text stringlengths 38 1.54M |
|---|
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
# %%
def normalise(values: pd.Series)-> pd.Series:
'''Function that transform a series by its Min Max normalization '''
return (values - values.min())/ (values.max() - values.min())
def plot_normalised_trends(df,columns,labels,ax):
if not ax:
import matplotlib.pyplot as plt
ax = plt
column_1,column_2 = columns
df[column_1] = normalise(df[column_1])
df[column_2] = normalise(df[column_2])
df = df.sort_values('Date', ascending=True)
if labels:
l1,l2 = labels
ax.plot(df['Date'], df[column_1], label=l1)
ax.plot(df['Date'], df[column_2], label=l2)
ax.legend(loc="lower right")
else:
ax.plot(df['Date'], df[column_1],df['Date'], df[column_2])
ax.ylim(-0.2, 1.2)
def plot_multiple_normalised_trends(df,base_col,columns,labels):
'''Function plots a normalised line graph for multiple numerical variables in the dataframe.
Input:
df: pandas DataFrame
columns: name of columns in DataFrame'''
n_plots = len(columns)
n_rows = max(n_plots // 3,2)
n_col = 3
fig = plt.figure(figsize=(200, 40)) # controls fig size
fig.set_size_inches(28,16)
fig, ax = plt.subplots(n_rows, n_col,figsize=(16,8), sharex='col', sharey='row')
# controls subplot size here ^
print(n_rows,n_col)
plt.subplots_adjust(left=0.30, bottom=0.20)
for i in range(n_rows):
for j in range(n_col):
print(i,j,n_plots)
if (i)*3 + (j) >= n_plots:
break
plot_normalised_trends(df,(base_col,columns[i*3+j]),(base_col,labels[i*3+j]),ax[i,j])
plt.show()
# %%
def plot_corr(df,size=10):
'''Function plots a graphical correlation matrix for each pair of columns in the dataframe.
Input:
df: pandas DataFrame
size: vertical and horizontal size of the plot'''
corr = df.corr()
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(corr)
for (i, j), z in np.ndenumerate(corr):
ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')
plt.xticks(range(len(corr.columns)), corr.columns, rotation=60, horizontalalignment='left');
plt.yticks(range(len(corr.columns)), corr.columns);
# %%
def plot_confusion_matrix(cm):
import matplotlib.pyplot as plt
from itertools import product
fig, ax = plt.subplots()
cmap='Blues'
im_ = ax.imshow(cm, interpolation='nearest', cmap='Blues')
xlen,ylen = cm.shape
thresh = (cm.max() + cm.min()) / xlen
display_labels=(0,1)
cmap_min, cmap_max = im_.cmap(0), im_.cmap(256)
for i, j in product(range(xlen), range(xlen)):
color = cmap_max if cm[i, j] < thresh else cmap_min
ax.text(j, i,format(cm[i, j], '.0f'),ha="center", va="center",color=color)
fig.colorbar(im_, ax=ax)
ax.set(xticks=np.arange(xlen),
yticks=np.arange(ylen),
xticklabels=display_labels,
yticklabels=display_labels,
ylabel="True label",
xlabel="Predicted label")
ax.set_ylim((2 - 0.5, -0.5))
plt.show()
return None
# %%
if __name__ == "__main__":
# 1.
df = pd.read_csv( os.path.join(os.getcwd(),"combined_data.csv"))
df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True)
df1 = df[['Increase']].apply(pd.value_counts)
df1.plot.bar(rot=0)
# 2. trend
closings = ("Close_10year_treasury", "Close_copper", "Close_gold","Close_hk_index" ,"Close_oil", "Close_s&p", "Value_us_sgd")
labels = ("10year_treasury", "Copper", "Gold","HK_index" ,"Crude Oil", "S&P", "SGD v USD")
plot_multiple_normalised_trends(df,"Close",closings,labels)
volume = ( "Volume_copper", "Volume_gold","Volume_hk_index" ,"Volume_oil", "Volume_s&p")
labels = ( "Copper", "Gold","HK_index" ,"Crude Oil", "S&P")
plot_multiple_normalised_trends(df,"Close",volume,labels)
# 3. correlation plot
plot_corr(df)
pass |
import serial
import RPi.GPIO as GPIO
import time
ser=serial.Serial("/dev/ttyACM0",9600)
start_time = time.time()
imu = open("IMU.txt","w")
while time.time() - start_time <= 1:
ser.readline()
while time.time() - start_time <= 8:
read_ser=ser.readline()
if float(read_ser) == 0.00:
pass
else:
read = read_ser.strip('\n')
imu.write(read)
imu.write('\n')
imu.close()
|
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import bz2
import os
import pickle
import queue
import threading
import urllib.request as urllib2
import pandas as pd
from bs4 import BeautifulSoup
class DatasetCovidUnemploymentUS:
"""
Class to load COVID-19 unemployment data for the US states.
Source: https://www.bls.gov/lau/
"""
def __init__(self, data_dir="", download_latest_data=True):
if not os.path.exists(data_dir):
print(
"Creating a dynamic data directory to store COVID-19 "
"unemployment data: {}".format(data_dir)
)
os.makedirs(data_dir)
filename = "monthly_us_unemployment.bz2"
if download_latest_data or filename not in os.listdir(data_dir):
# Construct the U.S. state to FIPS code mapping
state_fips_df = pd.read_excel(
"https://www2.census.gov/programs-surveys/popest/geographies/2017/"
"state-geocodes-v2017.xlsx",
header=5,
)
# remove all statistical areas and cities
state_fips_df = state_fips_df.loc[state_fips_df["State (FIPS)"] != 0]
self.us_state_to_fips_dict = pd.Series(
state_fips_df["State (FIPS)"].values, index=state_fips_df.Name
).to_dict()
print(
"Fetching the U.S. unemployment data from "
"Bureau of Labor and Statistics, and saving it in {}".format(data_dir)
)
self.data = self.scrape_bls_data()
fp = bz2.BZ2File(os.path.join(data_dir, filename), "wb")
pickle.dump(self.data, fp)
fp.close()
else:
print(
"Not fetching the U.S. unemployment data from Bureau of Labor and"
" Statistics. Using whatever was saved earlier in {}!!".format(data_dir)
)
assert filename in os.listdir(data_dir)
with bz2.BZ2File(os.path.join(data_dir, filename), "rb") as fp:
self.data = pickle.load(fp)
fp.close()
# Scrape monthly unemployment from the Bureau of Labor Statistics website
def get_monthly_bls_unemployment_rates(self, state_fips):
with urllib2.urlopen(
"https://data.bls.gov/timeseries/LASST{:02d}0000000000003".format(
state_fips
)
) as response:
html_doc = response.read()
soup = BeautifulSoup(html_doc, "html.parser")
table = soup.find_all("table")[1]
table_rows = table.find_all("tr")
unemployment_dict = {}
mth2idx = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
for tr in table_rows[1:-1]:
td = tr.find_all("td")[-1]
unemp = float("".join([c for c in td.text if c.isdigit() or c == "."]))
th = tr.find_all("th")
year = int(th[0].text)
month = mth2idx[th[1].text]
if year not in unemployment_dict:
unemployment_dict[year] = {}
unemployment_dict[year][month] = unemp
return unemployment_dict
def scrape_bls_data(self):
def do_scrape(us_state, fips, queue_obj):
out = self.get_monthly_bls_unemployment_rates(fips)
queue_obj.put([us_state, out])
print("Getting BLS Data. This might take a minute...")
result = queue.Queue()
threads = [
threading.Thread(target=do_scrape, args=(us_state, fips, result))
for us_state, fips in self.us_state_to_fips_dict.items()
]
for t in threads:
t.start()
for t in threads:
t.join()
monthly_unemployment = {}
while not result.empty():
us_state, data = result.get()
monthly_unemployment[us_state] = data
return monthly_unemployment
|
import requests
from bs4 import BeautifulSoup
import lxml
import smtplib
BUY_PRICE = 75.00
URL = "https://www.amazon.com/SanDisk-1TB-Extreme-Portable-SDSSDE61-1T00-G25/dp/B08GTYFC37/ref=sr_1_38?dchild=1&qid=1631216238&s=computers-intl-ship&sr=1-38"
test_url = "https://www.amazon.com/Instant-Pot-Duo-Evo-Plus/dp/B07W55DDFB/ref=sr_1_1?qid=1597662463"
params = {
"Accept-Language": "en-US,en;q=0.9,mn-MN;q=0.8,mn;q=0.7,ko-KR;q=0.6,ko;q=0.5",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36"
}
response = requests.get(url=URL, headers=params)
soup = BeautifulSoup(response.text, parser=lxml, features="lxml")
price = soup.select(".priceBlockBuyingPriceString")
price = [p.getText().split("$")[1] for p in price][0]
price_as_float = float(price)
print(type(price))
title = "Go Buy it! from Future YOU!"
if price_as_float < BUY_PRICE:
message = f"{title} is now {price}"
with smtplib.SMTP("YOUR_SMTP_ADDRESS", port=587) as connection:
connection.starttls()
result = connection.login("YOUR_EMAIL", "YOUR_PASSWORD")
connection.sendmail(
from_addr="YOUR_EMAIL",
to_addrs="YOUR_EMAIL",
msg=f"Subject:Amazon Price Alert!\n\n{message}\n{URL}"
)
|
#from normalization import normalize_corpus
from flask import Flask, jsonify, request
from flasgger import Swagger
from sklearn.externals import joblib
import numpy as np
from flask_cors import CORS
app = Flask(__name__)
Swagger(app)
CORS(app)
@app.route('/input/task', methods=['POST'])
def predict():
"""
Ini Adalah Endpoint Untuk Mengklasifikasi Lirik Lagu
---
tags:
- Rest Controller
parameters:
- name: body
in: body
required: true
schema:
id: Lirik
required:
- text
properties:
text:
type: string
description: Please input with valid text.
default: 0
responses:
200:
description: Success Input
"""
new_task = request.get_json()
text = new_task['text']
X_New = np.array([text])
#X_New=normalize_corpus(X_New)
pipe = joblib.load('neuralNetworkClassifier.pkl')
pipe2 = joblib.load('naiveBayesClassifier.pkl')
resultGenrePredict = pipe[0].predict(X_New)
resultEmosiPredict = pipe2[0].predict(X_New)
return jsonify({'genre': format(resultGenrePredict),'emosi' : format(resultEmosiPredict)})
if __name__ == '__main__':
app.run() #debug=True kalau deploy ga usah pakai ini dia print error
|
#!/VND_TSP/virtual/bin python3.6
# -*- coding: utf-8 -*-
import csv
import sys
import json
def grafo(vertices, distancias, output):
distancias[0].append(vertices)
with open(output, 'w') as file:
for key, value in distancias.items():
file.write('%s:%s\n' % (key, value))
def Main(iteracao, algoritmo, n_arq, solucao):
filename = "./saida.csv"
with open(filename, 'a') as output:
csvwriter = csv.writer(output)
rows = [[iteracao, algoritmo, n_arq, round(solucao, 4)]]
csvwriter.writerows(rows) |
##############################################################################
# This example will create a derived result for each time step asynchronously
##############################################################################
import rips
import time
# Internal function for creating a result from a small chunk of soil and porv results
# The return value of the function is a generator for the results rather than the result itself.
def create_result(soil_chunks, porv_chunks):
for soil_chunk, porv_chunk in zip(soil_chunks, porv_chunks):
resultChunk = []
number = 0
for soil_value, porv_value in zip(soil_chunk.values, porv_chunk.values):
resultChunk.append(soil_value * porv_value)
# Return a Python generator
yield resultChunk
resinsight = rips.Instance.find()
start = time.time()
case = resinsight.project.cases()[0]
timeStepInfo = case.time_steps()
# Get a generator for the porv results. The generator will provide a chunk each time it is iterated
porv_chunks = case.active_cell_property_async("STATIC_NATIVE", "PORV", 0)
# Read the static result into an array, so we don't have to transfer it for each iteration
# Note we use the async method even if we synchronise here, because we need the values chunked
# ... to match the soil chunks
porv_array = []
for porv_chunk in porv_chunks:
porv_array.append(porv_chunk)
for i in range(0, len(timeStepInfo)):
# Get a generator object for the SOIL property for time step i
soil_chunks = case.active_cell_property_async("DYNAMIC_NATIVE", "SOIL", i)
# Create the generator object for the SOIL * PORV derived result
result_generator = create_result(soil_chunks, iter(porv_array))
# Send back the result asynchronously with a generator object
case.set_active_cell_property_async(
result_generator, "GENERATED", "SOILPORVAsync", i
)
end = time.time()
print("Time elapsed: ", end - start)
print("Transferred all results back")
view = case.views()[0].apply_cell_result("GENERATED", "SOILPORVAsync")
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
type = ".jpeg"
front_dir = "faceNew\\"
file = open(front_dir+"currNum.txt","r")
pics = open(front_dir+"myPics.txt","a")
imNum=int(file.read())
while True:
_, frame = cap.read()
imNum += 1
path=front_dir + "im_{}".format(imNum) + type
cv2.imwrite(path, frame)
cv2.imshow('frame', frame)
pics.write(path+"\n")
print imNum
if cv2.waitKey(200) & 0xFF == ord('q'):
file=open(front_dir + "currNum.txt","w")
file.write(str(imNum))
break
|
###OPTIMISATION OF ALGORITHMIC TRADING STRATEGIES (ATS)
## ALGO TRADING STRATEGIES ARE FIXED AND DEFINED IN PROJECT_LIB2.PY IN SIGNAL()
## EXAMPLES: MACD(50,200), RSI(14), BOLLINGER BANDS ETC
# PROGRAM WILL OPTIMISE THE WEIGHTS BETWEEN STRATEGIES PER ASSET, AND THEN OPTIMIZE THE WEIGHTS BETWEEN ASSETS
# Most functions in project_lib2.py
# Training from 2000 to 2010, testing from 2010 to today (50% in sample, 50% out of sample)
# portfolio of Brent futures, Sp500 futures, 10-year Treasuries futures, Copper futures, Nasdaq futures, Gold futures
#imports
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pdblp #for Bloomberg API
import matplotlib.pyplot as plt
import pdblp #for Bloomberg API
import os
import sys
from scipy.optimize import minimize #For minimizing and finding weights
from scipy.optimize import minimize #For minimizing and finding weights
global insamplefactor
answer='Y'
assets=[]
lot_size = []
print("####Entering assets for the portfolio to test and trade####")
while answer=='Y':
ticker = input("Enter a Bloomberg ticker to test and trade: ")
assets.append(ticker)
lotsize = int(input("Lot size? (integer): "))
lot_size.append(lotsize)
answer = input("Would you like to enter another ticker? (Y/N): ")
start_date = input("Enter start date of training (YYYMMDD): ")
end_date = input("Enter end date of testing (YYYMMDD): ")
insamplefactor = float(input("Enter percentage of training data (float): "))
aum = int(input("Amount of money to manage (int): "))
##################FUNCTIONS from project_lib2.py################
# global variables
BBDATACACHEPATH = 'bbcache/'; # MUST END IN /
a = os.makedirs(BBDATACACHEPATH, exist_ok=1 )
# global variables for the optimizer
F = {} # will hold our backtest data and run data. It gets overwritten everytime we run a new contract
PF = {} # will hold portfolio PNL
BBCACHE = {} # mem cache of BB loads, to limit loads
# for output redirection to avoid scipy optimizer output excess
SYSNORMALSTDOUT = sys.stdout
# contains variables helping with risk management constraints
def myvars():
global insamplefactor
v = {}
#v['maxleverage'] = 1.75 #max leverage per ATS - passed in the constraint function of the optimizer
v['leveragecutoffthreshold'] = 2. #For any contract other than 10-year treasuries, max |leverage| per asset cannot be more than 2
v['leveragecutoffthresholdTY1'] = 4. #For 10-year Treasuries, max |leverage| of 4 as it is less volatile
v['insamplefactor'] = insamplefactor #fraction of total data which is training data (from start_date)
return v
# load data from bloomberg
def bbload(ticker, start_date, end_date):
global BBCACHE
global BBDATACACHEPATH
name = ticker[:3]
CSVcachefilename = BBDATACACHEPATH + ticker + '.' + start_date + end_date + '.csv'
if ticker in BBCACHE:
a = BBCACHE[ticker]
print('USING CACHED')
else:
# try to load CSV first, it is easier than BB and good for those without BB
try:
a = pd.read_csv(CSVcachefilename, index_col = "date" )
print('Loaded from CSV ' + CSVcachefilename)
# If that fails, load from BB
except:
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
a = con.bdh(ticker, ['PX_LAST'], start_date, end_date )
a.columns=['close']
#save as csv
#a.to_csv(CSVcachefilename)
#print('Loaded from BB and Saved to '+CSVcachefilename)
#cache
BBCACHE[ticker] = a
# save in global
global F
F['ticker'] = ticker #keep the ticker
F['name'] = name #give it a short name without spaces
return a
#adjusted brent returns for futures rolls
def adjBrentreturns(start_date,end_date):
a=pd.read_csv('CORollindex.csv')
a['rollix']=a['rollix'].astype(int)
co2=bbload('CO2 COMDTY',start_date,end_date)
co1=bbload('CO1 COMDTY',start_date,end_date)
ret=pd.DataFrame()
ret['co1']=np.log(co1.close/co1.close.shift(1))
ret['co2']=np.log(co2.close/co2.close.shift(1))
ret.index=co1.index
ret['adjret']=[ret['co1'][i] if a['rollix'][i]==0 else ret['co2'][i] for i in range(len(ret['co1'])) ]
return ret
#creating features for each asset
def feature(df,start_date,end_date):
#object df gets modified (features are appended to df)
global F
# returns. Can be adjusted later for futures rolls (overwritten)
df['ret']=np.log(df.close/df.close.shift(1))
#if F['name']=='CO1':
# df['ret']=adjBrentreturns(start_date, end_date)
# print('******** BRENT USING ADJ RETURNS ******')
# more features
df['ma50']=df.close.rolling(50).mean()
df['ma20']=df.close.rolling(20).mean()
df['ma200']=df.close.rolling(200).mean()
df['ma8']=df.close.rolling(8).mean()
df['std20'] = df.close.rolling(20).std()
df['boll20u'] = df['ma20'] + 2.*df['std20']*df['ma20']
df['boll20d'] = df['ma20'] - 2.*df['std20']*df['ma20']
df['rsi'] = rsi(df.close,14)
#"risk manager:" 20-day historical volatility
fact1 = df.ret.rolling(20).std()*np.sqrt(252)
#volatility weighting: we will divide positions by df['std20norm']. If vols pick up we reduce positions and vice versa
if F['name']=='TY1': #For 10-year Treasuries
df['std20norm'] = fact1/0.1
elif F['name']=='SP1' or F['name']=='NQ1': #For Sp500 or Nasdaq futures
df['std20norm'] = fact1/0.2
else:
df['std20norm'] = fact1/0.3 #For all else
v = myvars()
F['d'] = df
F['oosStart'] = int(len(df.ret)*v['insamplefactor']) #index where out of sample starts
return df
#RSI function
def rsi(prices, n=14):
pricediff=prices-prices.shift(1)
upmove=pd.Series()
downmove=pd.Series()
RS=pd.Series()
RSI=pd.Series()
upmove=pd.Series([pricediff[i] if pricediff[i]>0 else 0 for i in range(len(pricediff))])
downmove=pd.Series([-pricediff[i] if pricediff[i]<0 else 0 for i in range(len(pricediff))])
RS=upmove.rolling(n).mean()/downmove.rolling(n).mean()
RSI=100-100/(1+RS)
RSI.index=prices.index
return RSI
#creating signals for each asset
def signal(df):
s=pd.DataFrame()
s['ma50']=-np.array(df.ma50>df.close).astype(int)+np.array(df.ma50<df.close).astype(int)
s['ma20']=-np.array(df.ma20>df.close).astype(int)+np.array(df.ma20<df.close).astype(int)
s['ma200']=-np.array(df.ma200>df.close).astype(int)+np.array(df.ma200<df.close).astype(int)
s['ma50_200']=np.array(df.ma50>df.ma200).astype(int)-np.array(df.ma50<df.ma200).astype(int)
s['ma8_20']=np.array(df.ma8>df.ma20).astype(int)-np.array(df.ma8<df.ma20).astype(int)
#s['1d']=np.array(df.close>df.close.shift(1)).astype(int)-np.array(df.close<df.close.shift(1)).astype(int)
s['Bollinger']=np.array(df.close<df.boll20d).astype(int)-np.array(df.close>df.boll20u).astype(int)
s['rsi']=np.array(df['rsi']<25).astype(int)-np.array(df['rsi']>75).astype(int)
#add dates if helpful
s.index=df.index
# vol weight each signal. Not time variant.
s = pandaColDivideVector(s,df['std20norm'])
global F
F['s'] = s
return s
# unweighted PNL. This is the signal * returns only, no weights. Will be applied weights.
def pnl0(df,s):
ret=np.array(df.ret)
UW=pd.DataFrame()
for col in s.columns:
UW[col]=s[col].shift(1)*ret
return UW
#calculate leverage of each signal and sum (constrained by risk rules)
# Must have an x vector by now
def leverage(w):
global F
#basic signals
lev = F['s']*w
F['lev'] = lev
#augment a bit
levsum = leveragesum(lev)
F['levsum'] = levsum
F['netlev'] = levsum['sum']
return levsum
# Calculates the sum of signal leverage. The sum is not equal to the sum of the parts, necessarily.
# We want for example to chop the max leverage off to improve the average
def leveragesum(lev):
lev['sum'] = lev.sum(axis=1)
# chop max leverage
v = myvars()
if F['name']=='TY1':
THRES = v['leveragecutoffthresholdTY1']
else:
THRES = v['leveragecutoffthreshold']
ix = lev['sum']>THRES
lev['sum'][ix] = THRES
ix = lev['sum']<-THRES
lev['sum'][ix] = -THRES
return lev
# a sharpe measure for the optimizer only (using weights and the UW matrix)
def sharpeW(weights, dret):
n=len(dret)
sumsignals = (dret*weights).sum(axis=1)
cret=np.exp(sumsignals.sum())**(252/n)-1
print(cret)
std=np.std(sumsignals)*np.sqrt(252)
print(std)
return cret/std
# sharpe for return series (the standard)
def sharpe(logret):
n = len(logret)
p=np.exp(logret.sum())**(252/n)-1
s=np.std(logret)*np.sqrt(252)
return p/s
# will be used INSIDE the minimizer function, so only gets the X vector (replaced with cutoff to have higher averages)
#def tradeConstraintsFunc(x):
# v = myvars()
#Calculate a leverage on the proposed x
# global F
# lev = leverage(x)
# return [-np.max(np.array(lev['sum'] ))+v['maxleverage'], np.min(np.array(lev['sum'] ))+v['maxleverage'] ]
# OPTIMIZER LOSS FUNCTION
def lossFunc(w):
v = myvars()
global F
UW = F['UW']
# define an out of sample period and store it
n=int( len(UW) * v['insamplefactor'] )
F['oosStart'] = n
INSA = UW[0:n] #CRITICAL
# calculate some interesting quantities for use
pIS = (INSA * w).sum(axis=1) #PNL in sample
# Choose an optimization target
optTarget = F['optTarget']
if optTarget=='sharpe':
out = -sharpeW(w, INSA)
elif optTarget=='pnl':
out = -sum(pIS)
elif optTarget=='dd':
out=maxdrawdown(pIS)
elif optTarget=='calmar':
out=-np.exp(sum(pIS))/maxdrawdown(pIS)
else:
out = -sharpeW(w, INSA)
return out
# the core backtesting function. Produces some helpful plots. Per asset. (asset info is overwritten in F)
def backtest():
global F
# calculate the unweighted pnl
F['UW'] = pnl0(F['d'],F['s'])
#random init of weights. Set bounds.
n = len(F['s'].columns)
w0 = n * [ 1/n ]
BNDS = ((-1,1),)*n
#cons = ({'type': 'ineq','fun': tradeConstraintsFunc })
print ('** Minimize: Target:'+F['optTarget'])
#x = w0
#res = minimize(lossFunc, w0, tol=1e-6, bounds=BNDS, constraints=cons) #minimize chooses the method between BFGS, L-BFGS-B, and SLSQP
#res = minimize(lossFunc, w0, method='SLSQP',tol=1e-6, bounds=BNDS) #method=SLSQP -- no more constraints
nulloutput() # stop output to stdout for the min function
res = minimize(lossFunc, w0, method='SLSQP', tol=1e-6, bounds=BNDS, options={'disp': False, 'maxiter': 1e5 } ) #minimize with method SLSQP
normaloutput()
x = res.x
# Now store some calculated quantities for portfolio creation and analysis etc.
#x - store it safely
F['x'] = x #weigths between ATS for a given asset
#F['optimres'] = res
#calculate some final output results of the optim vector x
levsum = leverage(F['x'])
F['levsum'] = levsum
#PNL
F['cumpnl'] = ((F['UW']*F['x']).sum(axis=1).cumsum()).apply(np.exp); #Cumulative is real PNL (path of 1$)
F['pnl'] = (F['UW']*F['x']).sum(axis=1) #LOG PNL
#some output and plots to help
print('optimized X:')
print(F['x'])
# plot some key results
def plotresult():
global F
# LEVERAGE
levsum = F['levsum']
plt.plot(levsum['sum'])
plt.title('Leverage for ' + F['ticker'])
print('Max Leverage:')
print(np.max(levsum['sum']))
plt.show();
# PNL
pnl = F['cumpnl']
n = F['oosStart']
plt.plot(pnl) #plotting pnl (path of $1)
plt.scatter(pnl.index[n],pnl[n],color='r') #red point where out of sample starts
plt.title('PNL ' + F['ticker'])
plt.show()
# ease of use function to apply vector to each column
def pandaColDivideVector(p, v):
newpd = pd.DataFrame()
for col in p.columns:
newpd[col]=p[col]/v
return newpd
def yyyymmdd():
return datetime.now().strftime('%Y%m%d')
# very simple and fast max drawdown function. Only does the basics for speed!
# r is a log return vector. Probably need some formatting later of structures.
def maxdrawdown (r):
n = len(r)
# calculate vector of cum returns. DOES NOT WORK FOR REAL RETURNS. so has to be log.
cr = np.nancumsum(r);
#preallocate size of dd = size r
dd = np.empty(n);
# calculate drawdown vector
mx = 0;
for i in range(n):
if cr[i] > mx:
mx = cr[i]
dd[i] = mx - cr[i]
# calculate maximum drawdown
DD = max(dd);
return DD
# OPTIMIZER Portfolio (2nd optimization)
#for a dataframe input of PNL streams, produce the optimal blend vector x (weights between assets)
def pfopt(df):
global PF
PF = df;
n = len(df.columns)
w0 = n * [ 1/n ]
BNDS = ((-1,1),)*n
cons = ({'type': 'eq','fun': pfConsFunc })
res = minimize(pfGoodFunc, w0, tol=1e-6, bounds=BNDS, constraints=cons, options={ 'disp': False, 'maxiter': 1e5 } )
return res.x
# Sum of components less than 1
def pfConsFunc(x):
c1 = np.array(-np.sum(x) + 1)
return c1
#return np.concatenate([c1,c2])
# minimize variance?
def pfGoodFunc(x):
global PF
p = np.sum(PF * x,axis=1) # pnl
v = myvars()
n = int(len(p) * v['insamplefactor'])
INSA = p[0:n]
g = - sharpe(INSA) #negative since we minimize
return g
# STD out redirect
def nulloutput():
f = open(os.devnull, 'w')
sys.stdout = f
# STD out set back to normal. Needs global var
def normaloutput():
global SYSNORMALSTDOUT
sys.stdout = SYSNORMALSTDOUT
#########################################################
###INPUTS FOR JUPYTER NOTEBOOK####
#aum=500000000 #Money under management
#assets = ['CO1 COMDTY','SP1 INDEX','TY1 COMDTY','HG1 COMDTY', 'NQ1 INDEX', 'GC1 COMDTY']
#Brent futures, Sp500 futures, 10-year Treasuries futures, Copper futures, Nasdaq futures, Gold futures
#lot_size=[1000,250,1000,250,20,100] #1 point move generates that amount in USD (per asset)
#start_date='20000101'
#end_date='20200807'
#Insample: 50%, out of sample 50% (it can be changed in project_lib2.py in my_vars())
########
# %% build and backtest all
def run(asset,start_date,end_date):
#load Bloomberg data for the asset between start_date and end_date
df = bbload(asset,start_date,end_date)
# create features and signals
df = feature(df,start_date,end_date)
s = signal(df)
#backtest and optimize the x vector. All results in p.F
F['optTarget'] = 'sharpe'
backtest() # run optimization per asset and backtesting (optimize between ATS for each asset)
plotresult() #plotting
#%% Run each asset separately and store the PNL for each. Weights are optimized between ATS for each asset
PNL = pd.DataFrame()
POS = pd.DataFrame()
dPrices = pd.DataFrame()
for i in range( len(assets) ):
asset = assets[i]
print('***************** '+ asset)
run(asset,start_date,end_date)
name = F['name'] #name of asset
PNL[name] = F['pnl'].copy() # daily return per asset (p.F is a dataframe that gets written over for each asset)
POS[name] = F['netlev'].copy() #daily net delta per asset (before asset portfolio optimization)
dPrices[name] = F['d']['close'].copy() #daily price per asset
# %% optimize portfolio of assets (2nd optimization)
PF = PNL
xb = pfopt(PNL) #optimize between weights of assets (and plot PNL of each)
PNLw = PNL*xb # total weighted pnl
pfcumpnl = PNLw.sum(axis=1).cumsum().apply(np.exp) #cumulative returns+1, ie the path of $1 over time
plt.plot(PNL.cumsum().apply(np.exp)) #plotting each portfolio component before weights between them
plt.title('Portfolio Components')
plt.show()
v = myvars() #loading global constraints variables
n = int(len(pfcumpnl) * v['insamplefactor']) #point where out of sample starts
plt.plot(pfcumpnl) #plotting results of $1 invested at start in global portfolio
plt.title('Portfolio Blended OPTIM')
plt.scatter(pfcumpnl.index[n],pfcumpnl[n],color='r') # Red point is where Testing data starts
plt.show()
v=myvars()
path=PNLw.sum(axis=1)
n=len(path)
m=int(n*v['insamplefactor'])
print('Sharpe in sample: ', round(sharpe(path[0:m]),2))
print('Sharpe out of sample: ', round(sharpe(path[m+1:n]),2))
print('Volatility in sample: ', round(np.std(path[0:m])*np.sqrt(252),3))
print('Volatility out of sample: ', round(np.std(path[m+1:n])*np.sqrt(252),3))
tretIS=round(pfcumpnl[m]/pfcumpnl[0]-1,2)
print('Total return in sample: ', tretIS)
tretOS=round(pfcumpnl[n-1]/pfcumpnl[m]-1,2)
print('Total return out of sample: ', tretOS)
daysIS=m
daysOS=n-m
yearsIS=daysIS/252
yearsOS=daysOS/252
aretIS=round((1+tretIS)**(1/yearsIS)-1,4)
aretOS=round((1+tretOS)**(1/yearsOS)-1,4)
print('Annualized return in sample: ', aretIS)
print('Annualized return out of sample: ', aretOS)
print('years in sample: ', round(yearsIS,2))
print('years out of sample: ', round(yearsOS,2))
#Printing trades to make today. Bloomberg is loading the latest datapoint for each asset today.
#It should be run a few minutes before the close
trades=(POS-POS.shift(1))*xb*aum #trade value in USD per day
lotvalue=dPrices*lot_size #USD value of a lot for each contract (asset)
orders=pd.DataFrame(round(trades/lotvalue)) #Dadaframe showing daily lot orders per contract
for asset in orders.columns:
print("Trades for "+asset+" :", orders[asset].tail(1)[0]) #printing the trades to make now
#Showing historical trades in lots per contract for information
for asset in orders.columns:
plt.plot(orders[asset])
plt.title("Trades in lots for "+asset)
plt.show()
plt.plot((POS*xb).sum(axis=1))
plt.title("Historical overall Net leverage")
plt.show() |
import numpy as np
import itertools
import pprint
import pickle
import sys
class State:
def __init__(self,n=None,q=None,T=None):
if T is None:
self.n = n
self.q = q
self.T = np.zeros([n,q,q])
else:
self.n, self.q, _ = T.shape
self.T = T
self.basis = None # staticにしたい
self.feature = None
def shape(self):
return self.T.shape
def show(self):
print(self.T)
def save(self,path):
with open(path + '.pickle', 'wb') as f:
pickle.dump(self, f)
# wanna cash as static
def mk_basis(self):
q = self.q
n = self.n
bss = np.zeros([(q*q)**n,n,q,q])
p = itertools.product(range(0,q*q), repeat=n)
i = 0
for b in p:
for j in range(0,n):
for k in range(0,q*q):
if b[j]==k:
bss[i][j][int(k/q)][k%q] = 1
i = i+1
self.basis = bss
#def mk_feature():
# self.feature = 1#feature
# theta = np.random.uniform(size=2*2*2).reshape([2,2,2])
# s.basis[0]*(theta) #要素積
if __name__ == '__main__':
if (len(sys.argv)==1):
s = State(5,5)
s.mk_basis()
#s.save('./')
else:
with open('tmp.pickle', 'rb') as f:
print('LOADING')
s = pickle.load(f)
s.show()
print('-----------')
print(s.basis)
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,confusion_matrix
import pandas as pd
def fn(p):
if p==0:
return "Counterfit"
else:
return "Not Counter Fit"
t1=pd.read_csv("data_banknote_authentication.txt")
t=t1.as_matrix()
X=t[:,0:3]
y=t[:,4]
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2)
knn=KNeighborsClassifier(n_neighbors=1)
knn.fit(Xtrain,ytrain)
p=knn.predict(Xtest)
print "Accuracy Score :",accuracy_score(ytest,p)
print "Confusion Matrix : \n",confusion_matrix(ytest,p)
print " Enter the following values: "
a=[]
a.append(input("Variance of Wavlet Transform Image: "))
a.append(input("Skewness of Wavlet Transform Image: "))
a.append(input("Curtosis of Wavlet Transform Image: "))
a.append(input("Entropy of Image : "))
print fn(knn.predict(a.as_matrix))
|
from __future__ import division
import os
import pandas as pd
import numpy as np
import networkx as nx
from networkx.algorithms.centrality import betweenness as bt
import geopandas as gp
from math import radians, cos, sin, asin, sqrt
from shapely.geometry import LineString, Point
def prepare_centroids_list(G2_new_tograph):
'''
Input:
G2_new_tograph : Graph Networkx object
Output:
centroid_nodes : List of all centroid nodes ID
'''
#create list of nodes of centroids
G = G2_new_tograph.copy()
SG=G.subgraph( [n[0] for n in G.node.items() if n[1]['IsCentroid'] == 1 ] )
SG.nodes(data=True)
centroid_nodes = list(SG.nodes())
return centroid_nodes
#extract the longitude and latitude from geometry of the shapefile
def getXY(pt):
return (pt.x, pt.y)
#function to calculate linear distance between two points given lon and lat of each point
#http://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
#calculate distance centroid
def calc_distance_centroid(centroid_gdf):
#calculate longitude and latitude of points based on shapefile's geometry attribute
lon,lat = [list(t) for t in zip(*map(getXY, centroid_gdf['geometry']))]
#make an arbitrary dataframe to store distance information
distance = pd.DataFrame({'initiate' : []})
#calculate the distance between each OD pair
for i in range(len(lon)):
d = []
for j in range(len(lat)):
d.append(haversine(lon[i], lat[i], lon[j], lat[j]))
distance[i] = d
distance.drop(distance.columns[0], axis=1, inplace=True)
return distance
#generating production and attraction of each district
def gen_prod_attr(district_stats, prod_driver, attr_driver='Population_x'):
#GENERATING TRIP PRODUCTION
#assuming one trip consists of 10 tons
district_stats['trips_production'] = district_stats[prod_driver]
production = district_stats['trips_production']
#GENERATING TRIP ATTRACTION
#first calculate relative attraction of each district
district_stats[attr_driver] = district_stats[attr_driver].fillna(district_stats[attr_driver].mean())
relative_attr = district_stats[attr_driver] / district_stats[attr_driver].sum()
#then distribute the production over the relative attraction
attraction = relative_attr*production.sum()
return production, attraction
#calculate OD matrix for a given production driver
#code obtained from https://github.com/joshchea/python-tdm/blob/master/scripts/CalcDistribution.py
def CalcDoublyConstrained(ProdA, AttrA, F, maxIter = 10):
'''Calculates doubly constrained trip distribution for a given friction factor matrix
ProdA = Production array
AttrA = Attraction array
F = Friction factor matrix
maxIter (optional) = maximum iterations, default is 10
Returns trip table
'''
Trips1 = np.zeros((len(ProdA),len(ProdA)))
# print('Checking production, attraction balancing:')
sumP = sum(ProdA)
sumA = sum(AttrA)
if sumP != sumA:
AttrA = AttrA*(sumP/sumA)
AttrT = AttrA.copy()
ProdT = ProdA.copy()
else:
AttrT = AttrA.copy()
ProdT = ProdA.copy()
for balIter in range(0, maxIter):
for i in list(range(0,len(ProdA))):
Trips1[i,:] = ProdA[i]*AttrA*F[i]/max(0.000001, sum(AttrA * F[i]))
#Run 2D balancing --->
ComputedAttractions = Trips1.sum(0)
ComputedAttractions[ComputedAttractions==0]=1
AttrA = AttrA*(AttrT/ComputedAttractions)
ComputedProductions = Trips1.sum(1)
ComputedProductions[ComputedProductions==0]=1
ProdA = ProdA*(ProdT/ComputedProductions)
for i in list(range(0,len(ProdA))):
c = ProdA[i]*AttrA*F[i]/max(0.000001, sum(AttrA * F[i]))
Trips1[i,:] = c
dfc = pd.DataFrame(Trips1)
Trips1 = dfc.values.tolist()
return Trips1
def district_stats_to_OD_df(gdf_points, prod_driver, attr_driver='Population_x'):
'''
Input:
gdf_points : geodataframe from shapefile (Points) of centroids
production_driver : string of gdf's column name which will be production driver
Output:
OD_matrix : Dataframe of OD matrix with node id as column and row indices
'''
distance = calc_distance_centroid(gdf_points)
#simple deterrence function in the meantime
distance = 10000/distance
for i in list(distance.columns):
for j in list(distance.index.values):
if distance[i][j] > 9999999:
distance[i][j] = 0
#calcualte production and attraction based on the production driver
#attraction is automatically based on population
production, attraction = gen_prod_attr(gdf_points, prod_driver, attr_driver)
#calculate OD_Matrix
Trips1 = CalcDoublyConstrained(production, attraction, distance)
nodelist = list(gdf_points['Node'])
#rename the index and column into nodelist (based on the gdf_points)
OD_matrix = pd.DataFrame(Trips1, index=nodelist, columns=nodelist)
for i, row in OD_matrix.iterrows():
for j,val in row.iteritems():
if OD_matrix[i][j] < 0.1:
OD_matrix[i][j] = 0
return OD_matrix
def all_ods_creation(gdf_points, prod_lists, attr_driver):
od_dict={}
for prod in prod_lists:
od = district_stats_to_OD_df(gdf_points, prod_driver=prod, attr_driver=attr_driver)
od_dict["od_{0}".format(prod)]=od
return od_dict
def all_ods_creation_ema(gdf_points, prod_lists,attr_driver):
od_dict={}
for prod in prod_lists:
od = district_stats_to_OD_df(gdf_points, prod_driver=prod, attr_driver=attr_driver)
od_dict["od_{0}".format(prod)]=(od,prod)
return od_dict
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def factors_dict_creation(prod_lists):
#create arbitrary dictionary
factors_dict={}
#create scaling factors (for EMA run later)
factors_scale= [1] * len(prod_lists)
#enumerate all items in production lists
for i,prod in enumerate(prod_lists):
#create new item in dictionary with factor_00, factor_01, etc as keys
#and production name (e.g. Textile_exp_ton) as values
factors_dict[prod]=factors_scale[i]
return factors_dict
def od_aggregation(OD_all_dict, **factors_dict):
#create empty dictionary
OD_final_dict={}
#iterate over all items in original OD
for key1,val1 in OD_all_dict.iteritems():
#matching the production value of the OD dict and the factors_dict
for key2,val2 in factors_dict.iteritems():
#if it is a match
if val1[1] == key2:
#scale the OD flows of that particular product
OD_final_dict["od_{0}".format(val1[1])]=val1[0]*val2
#creation of final OD dataframe
OD_final_df = OD_final_dict[OD_final_dict.keys()[0]]
for i in range(len(OD_final_dict)-1):
OD_final_df = OD_final_df + OD_final_dict[OD_final_dict.keys()[i+1]]
return OD_final_df
def od_preparation(prod_lists, OD_all_dict, **factors_dict):
OD_final_df = od_aggregation(OD_all_dict, **factors_dict)
return OD_final_df
|
from django.shortcuts import render
from django.http import HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
import os
#Create a chatbot
chatbot=ChatBot('jarvis')
trainer = ListTrainer(chatbot)
from django.conf import settings
file_ = open(os.path.join(settings.BASE_DIR, 'conversations.yml')).readlines()
#training on english dataset
#for files in os.listdir('./english/'):
#data=open('conversations.yml','r').readlines()
trainer.train(file_)
@csrf_exempt
def get_response(request):
response = {'status': None}
if request.method == 'POST':
data = json.loads(request.body)
message = data['message']
chat_response = chatbot.get_response(message).text
response['message'] = {'text': chat_response, 'user': False, 'chat_bot': True}
response['status'] = 'ok'
else:
response['error'] = 'no post data found'
return HttpResponse(
json.dumps(response),
content_type="application/json"
)
def home(request):
return render(request,'home.html')
def report(request):
return render(request,'report.html') |
# -*- coding: utf-8 -*-
# @Time : 2020/5/13 15:04
# @Author : lxd
# @File : run.py
from torchvision import transforms
from utils.util import image_train_test_split
from utils.ImageDataset import ImageDataset
from torch.utils.data import DataLoader
from utils.train import train
from model.CNN_model import CNN_model
def main():
transform = transforms.Compose(
[
# transforms.Resize(100),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
train_img, test_img = image_train_test_split(root='../data/Discuz', p=5/6)
train_imageset = ImageDataset(root='../data/Discuz/', imgs=train_img, transform=transform)
train_img_loader = DataLoader(train_imageset, batch_size=128, shuffle=True, pin_memory=True)
test_imageset = ImageDataset(root='../data/Discuz/', imgs=test_img, transform=transform)
test_img_loader = DataLoader(test_imageset, batch_size=128, shuffle=False, pin_memory=True)
model = CNN_model()
train(model=model, train_loader=train_img_loader, test_loader=test_img_loader, step=128,
epochs=1024, lr=0.001, use_cuda=True)
if __name__ == '__main__':
main() |
# Euler Problem #1: Multiples of 3 and 5
# http://projecteuler.net/problem=1
# Q: Find the sum of all the multiples of 3 or 5 below 1000.
# A: 233168
# Closed form solution:
# Sum the arithemetic series of multiples of 3 and 5, then subtract the arithmetic series of 15 to avoid double counting
# Based off formula s = n(a1 + an)/2
# 333 multiples of 3 less than 1000
# 199 multiples of 5 less than 1000
# 66 multiples of 15 less than 1000
m3 = 333 * (3 + 999) / 2
m5 = 199 * (5 + 995) / 2
m15 = 66 * (15 + 990) / 2
print m3 + m5 - m15
# Computer Summing
# produce lists of multiples of 3,5,15 then iteratively add and subtract appropriate elements
c3 = range(3,1000,3)
c5 = range(5,1000,5)
c15 = range(15,1000,15)
sum_total = 0
for i in c3:
sum_total += i
for i in c5:
sum_total += i
for i in c15:
sum_total = sum_total - i
print sum_total
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-05 17:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0007_auto_20170605_1713'),
('boards', '0007_auto_20170605_1539'),
]
operations = [
]
|
from flask import Flask,render_template,request,redirect,url_for
import sys
sys.path.append("c:/program files/python37/lib/site-packages")
import pygal
from math import cos
app = Flask(__name__)
import random , os ,math,re
list_of_chars = ['A', 'B', 'C', 'D', 'E', '1', '2', '3', '4', '5']
#----------------------------------------------------------
#score function
def cal_wieght(itf_dic,tf_dic):
wieghts_dic= {}
for key1 , value1 in tf_dic.items():
tmp= {}
for key2, value2 in value1.items():
if key2 in itf_dic:
tmp.update({key2:(float(value1[key2]) * float(itf_dic[key2]))})
wieghts_dic.update({key1:tmp})
return wieghts_dic
#________________________________________________________________________________
# this function if the files created randomly
@app.route('/search2', methods =['GET', 'POST'] )
def search():
if request.method== 'POST' :
L=[]
qry_str = request.form['query']
print(qry_str)
open('Q.txt','r+').write(str(qry_str)) #store query in doc
print(str(open('Q.txt','r+').readlines()))
print(request.form['way'])
list_of_doc = ['1.txt', '2.txt', '3.txt', '4.txt', '5.txt']
if request.form['way'] == 'Random': #random or not
for k in list_of_doc:
random_size = random.randint(3, 10)
temp = []
for i in range(random_size):
st = random.choice(''.join(list_of_chars))
temp.append(st)
open(k, "r+").write(''.join(temp))
# calculation tf _____________________________________________________________
list_of_doc = ['1.txt', '2.txt', '3.txt', '4.txt', '5.txt','Q.txt']
tf_dic= {}
for a in list_of_doc:
tmp_dic = {}
j = {}
tmp_str =str(open(a).readlines())
print(tmp_str)
for a2 in list_of_chars:
p=['1','2','3','4','5']
if a2 not in p: # to count the number of char in the doc
s = tmp_str.count(a2)
j.update({a2: s})
for i in p:
tmp_str=tmp_str.replace(i,"")
tmp_dic.update({a2: s/(tmp_str.count(max(tmp_str,key=tmp_str.count)))})
print(j)
print(tmp_dic)
tf_dic.update({a.replace(".txt", ""): tmp_dic})
print(tf_dic)
# calculate itf___________________________________________________________________________________
itf_dic= {}
for a in list_of_chars:
if a not in p:
count= 0
for a2 in list_of_doc:
if a in str(open(a2).readlines()):
count=count+1
print(count)
if count!=0:
t=math.log2(len(list_of_doc)/count)
else:
t=0
itf_dic.update({a:t})
print("itf_dic",itf_dic)
t=cal_wieght(itf_dic,tf_dic) # tf*idf
q=t['Q']
del t['Q']
print("tf*idf",t)
print(q)
sim_dic={}
for key1 , value1 in t.items(): # claculate similarity
tmp= {}
score=0
for key2, value2 in value1.items():
if key2 in q:
score= score+(float(value1[key2]) * float(q[key2]))
sim_dic.update({key1:score})
print(sim_dic)
L=sorted (sim_dic.items(), key=lambda i:(i[1], i[0]), reverse=True)
'''--------------------------- link analysis ---------------------------- '''
list_of_doc = ['1.txt', '2.txt', '3.txt', '4.txt', '5.txt']
import numpy as np
adj_matrix=np.zeros((5,5))
for a in list_of_doc:
tmp=open(a,'r').readlines()
for b in range(5):
if (str(b+1) in str(tmp)) and ((b) != list_of_doc.index(a)) : # ignore looooops
adj_matrix[list_of_doc.index(a)][b]=1
adj_matrix_T=np.transpose(adj_matrix)
a=np.array([[1,1,1,1,1]]).T
h=a
print('initial a,h =', a,h)
print("adj matrix",adj_matrix)
print("adj matrix transpose",adj_matrix_T)
for i in range(20):
a=np.dot(adj_matrix_T,h)
a=a/(np.sqrt(np.sum(np.power(a,2))))
h=np.dot(adj_matrix,a)
h=h/(np.sqrt(np.sum(np.power(h,2))))
a=np.array(a).tolist()
h=np.array(h).tolist()
print('Authority =',a)
print('Hubs=',h)
result={}
result1={}
for i in range(5):
result.update({list_of_doc[i]:a[i]})
L1=sorted(result.items(), key=lambda i:(i[1], i[0]), reverse=True)
for i in range(5):
result1.update({list_of_doc[i]:h[i]})
L2=sorted(result1.items(), key=lambda i:(i[1], i[0]), reverse=True)
print(L1)
#bar
line_chart = pygal.Bar()
# line_chart = pygal.HorizontalBar()
line_chart.title = 'Authority and Hubs'
line_chart.x_labels = map(str, range(1,6))
a=np.array(a)
h=np.array(h)
print(result1)
line_chart.add('Authority',[x[0] for x in result.values()])#[a[[0]],a[[1]],a[[2]],a[[3]],a[[4]]])
line_chart.add('HUBS',[x[0] for x in result1.values()])#[h[0],h[1],h[2],h[3],h[4] ])
graph_data = line_chart.render_data_uri()
return render_template('vector_s.html',list=L, list1=L1,list2=L2, graph_data= graph_data)
return render_template('vector_s.html',list=[] ,list1=[],list2=[] )
if __name__ == '__main__':
app.run(debug = True) |
import cvas
import sys
client = cvas.client("8bttfegqwfX5Do6rgHIF4t/5Eco7uYm8MoSrpn6p6S8=", "http://localhost:5000")
with open("C:\\Users\\adamj\\OneDrive\\Study\\DP\\AlgorithmAssets\\car1.jpg", 'rb') as readFile:
file = client.upload_data(readFile.read(), "image/jpeg", ".jpg")
if file is None:
print("Error with uploading file from data")
sys.exit(1)
print(file.object_id)
algorithm = client.algorithm("license-plate-recognition")
result = algorithm.run([{ "c" : "eu", "n": 1}, file])
while result.status == "notFinished":
result.get()
print("Result: " + result.status)
print("StdOut: " + result.std_out)
print("StdErr:" + result.std_err)
print("Duration: " + str(result.duration) + " ms") |
import boto3
from moto import mock_s3
from Code.ReadFile import lambda_GetFileNames
sbucketName = "AIGBUCKET"
sfileName = "SampleFile.txt"
sBody = "AIG Sample File"
def test_lambda_get_file_names():
set_up_s3()
event = {
"BucketName": sbucketName
}
result = lambda_GetFileNames(event, None)
assert result == {"files": [{"filename": "SampleFile.txt"}]}
def set_up_s3():
with mock_s3():
# Create the bucket & write the object
s3 = boto3.resource('s3', region_name='us-east-1')
s3.create_bucket(Bucket=sbucketName)
s3.Object(bucket_name=sbucketName, key=sfileName)
|
# write a python program to add two numbers
num1 = 1.5
num2 = 6.3
sum = num1 + num2
print(f'Sum: {sum}')
# write a python program to multiply two numbers
num1 = 4
num2 = 3
prod = num1 * num2
print(f'Product: {prod}')
# write a python function to add two user provided numbers and return the sum
def add_two_numbers(num1, num2):
sum = num1 + num2
return sum
# write a program to find and print the largest among three numbers
num1 = 10
num2 = 12
num3 = 14
if (num1 >= num2) and (num1 >= num3):
largest = num1
elif (num2 >= num1) and (num2 >= num3):
largest = num2
else:
largest = num3
print(f'largest:{largest}')
# write a python program to multiply three numbers
num1 = 4
num2 = 3
num3 = 8
prod = num1 * num2 * num3
print(f'Product: {prod}')
# write a python program to multiply three numbers
num1 = 4
num2 = 3
num3 = 8
prod = num1 * num2 * num3
print(f'Product: {prod}')
#write a python function to print a string
def print_string(text):
print(text)
#write a python program to calculate square root of a number
num = 9
sqrt_num = num ** 0.5
print(f'square root: {sqrt_num}')
#write a python function to add two lists
list_1 = [2,34,5]
list_2 = [54,67,342]
result_list =[]
for i in range(0, len(list_1)):
result_list.append(list_1[i] + list_2[i])
# write a python program to find the factorial of a number provided by the user.
num = 13
factorial = 1
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
for i in range(1,num + 1):
factorial = factorial*i
print("The factorial of",num,"is",factorial)
# write a python program to find the ASCII value of the given character
c = 'p'
print("The ASCII value of '" + c + "' is", ord(c))
# write a python function to return size of a list
def size_of_list(l):
return(len(l))
# write a python program to append to two lists
list_1 = [4,'dasd',34,65,34,'fsd']
list_2 = [54,'fdssd',3,665,634,'ffsdfsdvsd']
appended_list = list_1 + list_2
# write a python function to remove punctuations
def remove_punctuation(text):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
my_str = "Hello!!!, he said ---and went."
no_punct = ""
for char in my_str:
if char not in punctuations:
no_punct = no_punct + char
return(no_punct)
# write a python function to tokenize a string
def tokenize(text):
return(text.split(' '))
# write a python function to get factorial of a number using recursion
def recur_factorial(n):
if n == 1:
return n
else:
return n*recur_factorial(n-1)
# write a python function to sort a list
def sort_list(l):
l.sort()
return(l)
#write a python program to print text in lower case
text = 'This SENTENCE had a MIX of lower and CAPS alphabets '
print(text.lower())
# write a function to replace a part of a string with another string
def replace_pattern(text,pattern,replacement):
result = re.sub(pattern, replacement, text)
return(result)
# write a python program to extract numbers from a list
l1 = [2,34,564,'asdasd','zebra','tsai',4,45,543,0,-1]
num_list=[]
for i in l1:
if type(i) == int or type(i) == float:
num_list.append(i)
print(num_list)
# write a python program to multiply two numbers using lambda
x = lambda a, b : a * b
print(x(5, 6))
# write a python program to handle exception if values do not match
try:
print(x)
except Exception as e:
print(e)
# write a python program to print numbers between 2 numbers
x1 = 3
x2 = 12
for i in range(x1,x2):
print(i)
# write a python program to import pandas
import pandas as pd
# write a python program to import numpy
import numpy as np
# write a python program to copy all elements from one array to another
arr1 = [1, 2, 3, 4, 5];
arr2 = [None] * len(arr1);
for i in range(0, len(arr1)):
arr2[i] = arr1[i];
# write a python program to print duplicate elements in an array
arr = [1, 2, 3, 4, 2, 7, 8, 8, 3];
for i in range(0, len(arr)):
for j in range(i+1, len(arr)):
if(arr[i] == arr[j]):
print(arr[j]);
# write a python program to sort words in alphabetic order
my_str = "this is a sample text"
words = my_str.split()
words.sort()
for word in words:
print(word)
# write a python function to check if its a leap year
def leap_year_check(year):
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
else:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
# write a python program to calculate number of days between dates
from datetime import date
f_date = date(2014, 7, 2)
l_date = date(2014, 7, 11)
delta = l_date - f_date
print(delta.days)
# write a python program to write a text to a txt file
text='sample text goes here'
with open("test.txt",'w',encoding = 'utf-8') as f:
f.write(text)
# write a python program to read a txt file into a list
f = open("test.txt",'r',encoding = 'utf-8')
lines = f.readlines()
f.close()
|
from bidict import bidict
from django.conf import settings
class MessageHeader:
__slots__ = ['msg_type', 'version']
msg_type: str
version: int
def __init__(self, msg_type, version=None):
self.msg_type = msg_type
if version is None:
self.version = settings.BOBOLITH_PROTOCOL_VERSION
MESSAGE_TYPES = bidict()
def message_mixin(msg_type: str):
class MessageMixin:
__slots__ = ['header']
header: MessageHeader
def __init__(self, header=None, **kwargs):
if header is None:
self.header = MessageHeader(msg_type=msg_type)
for key, value in kwargs.items():
setattr(self, key, value)
def __init_subclass__(cls, **kwargs):
if cls not in MESSAGE_TYPES.inverse:
MESSAGE_TYPES[msg_type] = cls
def to_json(self):
return {slot: getattr(self, slot)
for slot in self.__slots__
if hasattr(self, slot)}
return MessageMixin
class PingMessage(message_mixin('ping')):
__slots__ = ['ping']
ping: str
class PongMessage(message_mixin('pong')):
__slots__ = ['pong']
pong: str
|
import tkinter as Tk
import tkinter.font as tkFont
from tkinter import ttk
from tkinter import OptionMenu
import os.path
import numpy as np
from lxml import etree
import os
from picoh import picoh
from copy import deepcopy
import platform
import threading
import csv
import os
import random
import sys
from threading import Timer
# Class to hold Eyeshape information. Same fields as Picoh.obe xml file.
class EyeShape(object):
def __init__(self, name_value, hexString_value, autoMirror_value, pupilRangeX_value, pupilRangeY_value):
self.name = name_value
self.hexString = hexString_value
self.autoMirror = autoMirror_value
self.pupilRangeX = pupilRangeX_value
self.pupilRangeY = pupilRangeY_value
class PicohEyeDesigner(Tk.Frame):
# Class variables
operatingSystem = platform.system()
# %%%%
picohConnected = picoh.connected
# Setup variables.
clickedDown = False
pupilActive = True
drawing = False
startedMoving = False
currentfilename = ""
# Binary grids, one for each button.
gridArray = np.zeros((9, 8))
gridArrayOne = np.zeros((9, 8))
gridArrayTwo = np.zeros((9, 8))
gridArrayThree = np.zeros((9, 8))
gridArrayFour = np.zeros((9, 8))
gridArrayFive = np.zeros((9, 8))
buttonArray = []
buttonArrayOne = []
buttonArrayTwo = []
buttonArrayThree = []
buttonArrayFour = []
buttonArrayFive = []
# List of EyeShape objects.
shapeList = []
# Coordinates for top left of window
rootx = 20
rooty = 40
# Variables to hold colour and size preferences.
bgCol = 'white'
textCol = 'black'
buttonCol = 'white'
if operatingSystem == 'Windows':
buttonCol = 'grey'
# pupilButtonHighlightColour = '#408bf9'
pupilButtonHighlightColour = 'SkyBlue1'
buttonWidth = 10
buttonHeight = 3
if operatingSystem == "Linux":
tickWidth = 11
else:
tickWidth = 15
tree = None
def __init__(self, parent,frameIn):
#Tk.Frame.__init__(self, parent)
self.parent = parent
self.frame = frameIn
# Tk.Frame.__init__(self.frame, parent)
#self.initialize()
# Configure Window
#self.parent.title("Picoh Eye Shape Designer")
self.parent.grid_rowconfigure(1, weight=0)
self.parent.grid_columnconfigure(1, weight=0)
if self.operatingSystem == "Darwin":
self.customFont = tkFont.Font(family="Letter Gothic Std", size=11)
if self.operatingSystem == "Windows" or self.operatingSystem == "Linux":
self.customFont = tkFont.Font(family="Helvetica", size=8)
self.frame.configure(bg=self.bgCol)
self.screen_width = root.winfo_screenwidth()
self.screen_height = root.winfo_screenheight()
# Variables to track tick boxes:
self.pupilVar = Tk.BooleanVar()
self.pupilVar.set(True)
self.pupilTrack = Tk.BooleanVar()
self.pupilTrack.set(False)
self.mirrorVar = Tk.IntVar()
self.mirrorVar.set(0)
self.speak = Tk.IntVar()
self.speak.set(0)
self.rangeVar = Tk.IntVar()
self.rangeVar.set(0)
# Create popups for rename and new shape.
self.entryPopTwo = Tk.Entry(self.frame, width=20, text="Test", font=self.customFont)
self.entryPopTwo.bind('<Return>', self.rename)
self.entryPop = Tk.Entry(self.frame, width=20, text="Test", font=self.customFont)
self.entryPop.bind('<Return>', self.newShape)
# Add pupil overlay and pupil track checkboxes
checkbox = Tk.Checkbutton(self.frame, text="Overlay Pupil", variable=self.pupilVar, command=self.checkBoxAction)
checkbox.grid(row=1, rowspan=1, column=18, columnspan=7, sticky="w")
checkbox.configure(bg=self.bgCol, font=self.customFont)
pupilTrackBox = Tk.Checkbutton(self.frame, text="Mouse-Pupil", variable=self.pupilTrack,
command=self.pupilTrackAction)
pupilTrackBox.grid(row=8, rowspan=1, column=27, columnspan=6, sticky="w")
pupilTrackBox.configure(bg=self.bgCol, font=self.customFont, width=self.tickWidth)
# Labels
l1 = Tk.Label(self.frame, text="Eyeshape")
l1.grid(row=0, column=0, columnspan=4, sticky="W", padx=(10, 0))
l1.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
l2 = Tk.Label(self.frame, text="Pupil")
l2.grid(row=0, column=9, columnspan=3, sticky="W")
l2.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
l3 = Tk.Label(self.frame, text="Blink 1")
l3.grid(row=10, column=0, columnspan=4, sticky="W", padx=(10, 0))
l3.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
l4 = Tk.Label(self.frame, text="Blink 2")
l4.grid(row=10, column=9, columnspan=3, sticky="W")
l4.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
l5 = Tk.Label(self.frame, text="Blink 3")
l5.grid(row=10, column=18, columnspan=3, sticky="W")
l5.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
l6 = Tk.Label(self.frame, text="Blink 4")
l6.grid(row=10, column=27, columnspan=3, sticky="W")
l6.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
self.textLab = Tk.Label(self.frame, text='Are You Sure?', font=self.customFont)
self.filenamelabel = Tk.Label(self.frame, text="")
# self.filenamelabel.grid(row=13,column = 0,columnspan = 10,sticky = "W", padx = (10,0))
# Create 2D arrays with 0's to hold button states.
for x in range(0, 6):
for j in range(9):
column = []
for i in range(8):
column.append(0)
self.getButtonArray(x).append(column)
# New Button
self.newButton = Tk.Button(self.frame, text="New", image="", command=self.newButton, width=self.buttonWidth)
self.newButton.grid(row=4, column=27, columnspan=4, sticky="w")
self.newButton.configure(highlightbackground=self.bgCol, font=self.customFont)
# Rename Button
self.renameButton = Tk.Button(self.frame, text="Rename", command=self.renameButton)
self.renameButton.grid(row=4, column=31, columnspan=4, sticky="e")
self.renameButton.configure(highlightbackground=self.bgCol, font=self.customFont, width=self.buttonWidth)
# Duplicate Button
self.dupButton = Tk.Button(self.frame, text="Duplicate", command=self.duplicate, width=self.buttonWidth)
self.dupButton.grid(row=5, column=31, columnspan=4, sticky="e")
self.dupButton.configure(highlightbackground=self.bgCol, font=self.customFont)
# Delete button
self.delButton = Tk.Button(self.frame, text="Delete", command=self.deleteShapeButton, width=self.buttonWidth)
self.delButton.grid(row=5, column=27, columnspan=4, sticky="w")
self.delButton.configure(highlightbackground=self.bgCol, font=self.customFont)
# Test blink button
self.blinkButton = Tk.Button(self.frame, text="Test Blink", command=self.testBlink, width=self.buttonWidth)
self.blinkButton.grid(row=7, column=31, columnspan=4, sticky="e")
self.blinkButton.configure(highlightbackground=self.bgCol, font=self.customFont, width=9)
# Speak tick box, have picoh read out file names and changes.
self.speakTickBox = Tk.Checkbutton(self.frame, text="Speak", variable=self.speak)
# self.speakTickBox.grid(row=8, column=31, columnspan = 4, sticky="e")
self.speakTickBox.config(bg=self.bgCol, highlightcolor=self.textCol, font=self.customFont, width=self.tickWidth)
# Reset buttons for each grid
self.resetButton = Tk.Button(self.frame, text='Clear', command=lambda: self.reset(0))
self.resetButton.grid(row=0, column=5, columnspan=3, sticky="E")
self.resetButton.configure(highlightbackground=self.bgCol, fg=self.textCol, font=self.customFont)
self.resetButtonOne = Tk.Button(self.frame, text="Clear", command=lambda: self.reset(1))
self.resetButtonOne.grid(row=0, column=14, columnspan=3, sticky="E")
self.resetButtonOne.configure(highlightbackground=self.bgCol, font=self.customFont)
self.resetButtonTwo = Tk.Button(self.frame, text="Clear", command=lambda: self.reset(2))
self.resetButtonTwo.grid(row=10, column=5, columnspan=3, sticky="E")
self.resetButtonTwo.configure(highlightbackground=self.bgCol, font=self.customFont)
self.resetButtonThree = Tk.Button(self.frame, text="Clear", command=lambda: self.reset(3))
self.resetButtonThree.grid(row=10, column=14, columnspan=3, sticky="E")
self.resetButtonThree.configure(highlightbackground=self.bgCol, font=self.customFont)
self.resetButtonFour = Tk.Button(self.frame, text="Clear", command=lambda: self.reset(4))
self.resetButtonFour.grid(row=10, column=23, columnspan=3, sticky="E")
self.resetButtonFour.configure(highlightbackground=self.bgCol, font=self.customFont)
self.resetButtonFive = Tk.Button(self.frame, text="Clear", command=lambda: self.reset(5))
self.resetButtonFive.grid(row=10, column=32, columnspan=3, sticky="E")
self.resetButtonFive.configure(highlightbackground=self.bgCol, font=self.customFont)
# copy buttons
copyDownButton = Tk.Button(self.frame, width=0, height=0, borderwidth=0,
highlightthickness=-2, image=copyDown, padx=-2, pady=-2)
copyDownButton.configure(highlightbackground=self.bgCol)
copyRightOneButton = Tk.Button(self.frame, width=0, height=0, borderwidth=0,
highlightthickness=-2, image=copyRight, padx=-2, pady=-2)
copyRightOneButton.configure(highlightbackground=self.bgCol)
copyRightTwoButton = Tk.Button(self.frame, width=0, height=0, borderwidth=0,
highlightthickness=-2, image=copyRight, padx=-2, pady=-2)
copyRightTwoButton.configure(highlightbackground=self.bgCol)
copyRightThreeButton = Tk.Button(self.frame, width=0, height=0, borderwidth=0,
highlightthickness=-2, image=copyRight, padx=-2, pady=-2)
copyRightThreeButton.configure(highlightbackground=self.bgCol)
# Buttons used during renaming or the creation of a new shape.
self.but = Tk.Button(self.frame, text="Yes",
command=self.deleteShape, font=self.customFont, width=self.buttonWidth)
self.butCancel = Tk.Button(self.frame, text="No",
command=self.cancel, font=self.customFont, width=self.buttonWidth)
self.okayOne = Tk.Button(self.frame, text="Okay", highlightbackground=self.bgCol,
command=self.newShape, font=self.customFont, width=self.buttonWidth)
self.cancelOne = Tk.Button(self.frame, text="Cancel", highlightbackground=self.bgCol,
command=self.cancel, font=self.customFont, width=self.buttonWidth)
self.okayTwo = Tk.Button(self.frame, text="Okay", highlightbackground=self.bgCol,
command=self.rename, font=self.customFont, width=self.buttonWidth)
self.cancelTwo = Tk.Button(self.frame, text="Cancel",
command=self.cancel, highlightbackground=self.bgCol, font=self.customFont,
width=self.buttonWidth)
# Add copy buttons to grid.
copyDownButton.grid(row=10, column=3)
copyRightOneButton.grid(row=15, column=8)
copyRightTwoButton.grid(row=15, column=17)
copyRightThreeButton.grid(row=15, column=26)
# Bind commands to copy buttons.
copyDownButton.bind("<Button>", lambda event, grid=0: self.copyGrid(event, grid, grid + 2))
copyDownButton.bind("<ButtonRelease-1>", self.OnMouseUp)
copyRightOneButton.bind("<Button>", lambda event, grid=2: self.copyGrid(event, grid, grid + 1))
copyRightOneButton.bind("<ButtonRelease-1>", self.OnMouseUp)
copyRightTwoButton.bind("<Button>", lambda event, grid=3: self.copyGrid(event, grid, grid + 1))
copyRightTwoButton.bind("<ButtonRelease-1>", self.OnMouseUp)
copyRightThreeButton.bind("<Button>", lambda event, grid=4: self.copyGrid(event, grid, grid + 1))
copyRightThreeButton.bind("<ButtonRelease-1>", self.OnMouseUp)
# Picoh button, toggles sending data to Picoh. If not Picoh detected default to off.
if self.picohConnected:
chosenLogo = logoOn
picoh.reset()
picoh.close()
else:
chosenLogo = logo
# Create Picoh logo button.
self.picohButton = Tk.Button(self.frame, command=self.picohToggle, image=chosenLogo)
self.picohButton.grid(row=0, column=27, columnspan=20, rowspan=3, sticky="s")
if self.operatingSystem == "Windows":
self.picohButton.grid(rowspan=3, sticky="n", row=0)
self.picohButton.configure(highlightbackground=self.bgCol)
# picohPanel = Tk.Label(self.frame, image=picohGraphic)
# picohPanel.grid(row=9, column=8, columnspan=16, rowspan=16, sticky="sw")
# Generate button grids: (xStart,yStart,grid)
self.generateButtons(0, 1, 0)
self.generateButtons(9, 1, 1)
self.generateButtons(0, 11, 2)
self.generateButtons(9, 11, 3)
self.generateButtons(18, 11, 4)
self.generateButtons(27, 11, 5)
# Create a Tkinter variable
self.tkvar = Tk.StringVar(self.frame)
# Read in data from Picoh.obe xml file.
self.xmlReadin()
# Load the Shapelist with
self.refreshShapeList()
# Trace tkvar to enable shape chosen in drop down to be loaded
self.tkvar.trace_id = self.tkvar.trace_variable("w", self.loadShape)
self.saved = True
# x and y range entry boxes
self.xRangeVar = Tk.StringVar()
self.xRangeVar.set('5')
self.yRangeVar = Tk.StringVar()
self.yRangeVar.set('5')
self.xRangeEntry = Tk.Entry(self.frame, width=2, textvariable=self.xRangeVar)
# self.xRangeEntry.grid(row=7, column=23, columnspan=5, sticky='w')
self.xRangeEntry.config(bg='white', font=self.customFont)
self.yRangeEntry = Tk.Entry(self.frame, width=2, textvariable=self.yRangeVar)
# self.yRangeEntry.grid(row=8, column=23, columnspan=5, sticky='w')
self.yRangeEntry.config(bg='white', font=self.customFont)
self.xRangeLabel = Tk.Label(self.frame, text="Pupil Range X", height=1, font=self.customFont)
# self.xRangeLabel.grid(row=7, column=18, columnspan=5, sticky='w')
self.xRangeLabel.config(bg=self.bgCol, fg=self.textCol)
self.yRangeLabel = Tk.Label(self.frame, text="Pupil Range Y")
# self.yRangeLabel.grid(row=8, column=18, columnspan=5, sticky='w')
self.yRangeLabel.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
self.xRangeVar.trace_variable("w", self.updateRange)
self.yRangeVar.trace_variable("w", self.updateRange)
# Create check boxes
self.mirrorCheckbox = Tk.Checkbutton(self.frame, text="Auto Mirror", variable=self.mirrorVar,
command=self.mirrorChange)
self.mirrorCheckbox.grid(row=7, rowspan=1, column=27, columnspan=6, sticky="w")
self.mirrorCheckbox.config(bg=self.bgCol, highlightcolor=self.textCol, font=self.customFont,
width=self.tickWidth)
self.rangeCheckbox = Tk.Checkbutton(self.frame, text="Show Pupil Range", variable=self.rangeVar,
command=self.displayRange)
# self.rangeCheckbox.grid(row=5, rowspan=1, column=18, columnspan=7, sticky="w")
self.rangeCheckbox.config(bg=self.bgCol, fg=self.textCol, font=self.customFont)
# Pack frame.
#self.frame.pack(fill=Tk.X, padx=0, pady=0)
root.bind('<Motion>', self.motion)
# Load first shape in the list.
self.shapeIndex = 0
self.loadShape(True, shapeName=self.shapeList[self.shapeIndex].name, loading=True)
# self.updatePicoh()
self.checkBoxAction()
checkbox.invoke()
if self.operatingSystem == "Windows" or self.operatingSystem == "Linux":
if self.operatingSystem == "Linux":
winRowheight = 11
if self.operatingSystem == "Windows":
winRowheight = 13
self.newButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.renameButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.dupButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.delButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.blinkButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.okayOne.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.okayTwo.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.cancelOne.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.cancelTwo.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.but.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.butCancel.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.resetButton.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 5)
self.resetButtonOne.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 5)
self.resetButtonTwo.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 5)
self.resetButtonThree.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 5)
self.resetButtonFour.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 5)
self.resetButtonFive.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 5)
if self.operatingSystem != "Linux":
self.mirrorCheckbox.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 7)
pupilTrackBox.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 7)
checkbox.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 7)
self.rangeCheckbox.configure(compound="c", image=pixelImage, height=winRowheight,
width=self.buttonWidth * 7)
if self.operatingSystem == "Linux":
self.mirrorCheckbox.config(width=self.tickWidth)
pupilTrackBox.config(width=self.tickWidth)
checkbox.config(width=self.tickWidth)
self.yRangeLabel.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 7)
self.xRangeLabel.configure(compound="c", image=pixelImage, height=winRowheight, width=self.buttonWidth * 7)
self.popupMenu.configure(compound="c", image=pixelImage, height=8, width=self.buttonWidth * 14)
self.textLab.configure(compound="c", image=pixelImage, height=8, width=self.buttonWidth * 7)
self.updatePicoh()
# Function to generate buttons
def generateButtons(self, buttonStartX, buttonStartY, grid):
for i in range(0, self.getGridArray(grid).shape[0]):
for j in range(0, self.getGridArray(grid).shape[1]):
b = Tk.Button(self.frame, highlightbackground=self.buttonCol, height=0, borderwidth=0,
highlightthickness=2, padx=0, pady=0)
if self.operatingSystem == "Windows":
b.config(bg=self.buttonCol)
if j == 0 and grid == 0 or j == 0 and grid == 2:
b.grid(row=i + buttonStartY, column=j + buttonStartX, padx=(10, 0))
else:
b.grid(row=i + buttonStartY, column=j + buttonStartX)
b.config(image=offImage)
# Bind events
b.bind("<B1-Motion>", lambda event, grid=grid: self.OnMouseMove(event, grid))
b.bind("<Leave>", self.OnMouseLeave)
b.bind("<Button>", lambda event, grid=grid, x=i, y=j: self.OnMouseDown(event, x, y, grid))
b.bind("<ButtonRelease-1>", self.OnMouseUp)
# Add button to button array
self.getButtonArray(grid)[i][j] = b
# Copies grid to destination.
def copyGrid(self, event, grid, destination):
if grid == 0:
for i in range(0, 9):
for j in range(0, 8):
if self.getGridArray(grid)[i][j]:
self.turnButtonOn(i, j, destination, loading=False)
else:
self.turnButtonOff(i, j, destination, loading=False)
else:
for i in range(0, 9):
for j in range(0, 8):
if self.getGridArray(grid)[i][j]:
self.turnButtonOn(i, j, destination, loading=False)
else:
self.turnButtonOff(i, j, destination, loading=False)
self.saved = False
def removeFromXML(self, nameToDelete):
root = self.tree.getroot()
for channel in root:
for item in channel:
if item[0].text == nameToDelete:
channel.remove(item)
self.writeToFile()
return
def renameInXML(self, nameToChange, newName):
root = self.tree.getroot()
for channel in root:
for item in channel:
if item[0].text == nameToChange:
item[0].text = newName
self.writeToFile()
return
def updateXML(self, eyeShape):
root = self.tree.getroot()
for channel in root:
for item in channel:
if item[0].text == eyeShape.name:
item[2].text = str(eyeShape.pupilRangeX)
item[3].text = str(eyeShape.pupilRangeY)
item[5].text = eyeShape.hexString
if eyeShape.autoMirror:
item[6].text = 'true'
else:
item[6].text = 'false'
self.writeToFile()
return
return
def addToXML(self, eyeShape):
root = self.tree.getroot()
for channel in root:
for item in channel:
if item[0].text == self.tkvar.get():
newElement = deepcopy(item)
newElement[0].text = eyeShape.name
newElement[2].text = str(eyeShape.pupilRangeX)
newElement[3].text = str(eyeShape.pupilRangeY)
newElement[5].text = eyeShape.hexString
if eyeShape.autoMirror:
newElement[6].text = 'true'
else:
newElement[6].text = 'false'
channel.append(newElement)
self.writeToFile()
# print(eyeShape.name)
return
def writeToFile(self):
my_tree = self.tree
# directory = picoh.dir
# file = os.path.join(directory, 'Ohbot.obe')
file = picoh.eyeShapeFile
with open(file, 'wb') as f:
f.write(etree.tostring(my_tree))
f.close()
# Shows highlighted blue squares on pupil grid to indicate range.
def displayRange(self):
xRange = self.shapeList[self.shapeIndex].pupilRangeX
yRange = self.shapeList[self.shapeIndex].pupilRangeY
if xRange > 8:
xRange = 8
if yRange > 9:
yRange = 9
if xRange < 0:
xRange = 0
if yRange < 0:
yRange = 0
yStart = (3 - int(yRange / 2))
xStart = (3 - int(xRange / 2))
if xStart < 0:
xStart = 0
if yStart < 0:
yStart = 0
for i in range(0, 9):
for j in range(0, 8):
if self.gridArrayOne[i][j]:
self.buttonArrayOne[i][j].config(highlightbackground='grey')
if self.operatingSystem == "Windows":
self.buttonArrayOne[i][j].config(bg=self.buttonCol)
else:
self.buttonArrayOne[i][j].config(highlightbackground='grey')
if self.operatingSystem == "Windows":
self.buttonArrayOne[i][j].config(bg=self.buttonCol)
for i in range(xStart, xStart + xRange):
for j in range(yStart, yStart + yRange):
if self.rangeVar.get():
self.getButtonArray(1)[j][i].config(highlightbackground=self.pupilButtonHighlightColour)
if self.operatingSystem == "Windows":
self.getButtonArray(1)[j][i].config(bg=self.pupilButtonHighlightColour)
if self.operatingSystem != "Linux":
self.xRangeVar.set(str(xRange))
self.yRangeVar.set(str(yRange))
def pupilTrackAction(self):
if self.pupilTrack.get():
return
else:
picoh.move(picoh.EYETURN, 5)
picoh.move(picoh.EYETILT, 5)
# Sets pupil range, called when value is changed in entry box.
def updateRange(self, *args):
if self.xRangeVar.get() == '' or self.yRangeVar.get() == '':
return
self.shapeList[self.shapeIndex].pupilRangeX = int(self.xRangeVar.get())
self.shapeList[self.shapeIndex].pupilRangeY = int(self.yRangeVar.get())
self.displayRange()
self.updateXML(self.shapeList[self.shapeIndex])
def refreshShapeList(self):
self.choices = []
for entry in self.shapeList:
self.choices.append(entry.name)
# Get first item in list of choices and set as default
self.choices.sort()
# first = next(iter(self.choices), None)
# self.tkvar.set(self.shapeList[0].name)
# update popup menu with names from shapelist.
self.popupMenu = Tk.OptionMenu(self.frame, self.tkvar, *self.choices)
self.popupMenu.grid(row=3, column=27, columnspan=14, sticky="w")
self.popupMenu.configure(width=20, font=self.customFont)
if self.operatingSystem == "Windows" or self.operatingSystem == "Linux":
self.popupMenu.configure(compound="c", image=pixelImage, height=8, width=self.buttonWidth * 14,justify=Tk.LEFT)
self.popupMenu.grid(columnspan=15)
def duplicate(self):
currentShape = self.shapeList[self.shapeIndex]
newEyeShape = EyeShape("New", "", False, 5, 5)
newEyeShape.autoMirror = currentShape.autoMirror
newEyeShape.hexString = currentShape.hexString
newEyeShape.name = currentShape.name + " (Copy)"
newEyeShape.pupilRangeX = currentShape.pupilRangeX
newEyeShape.pupilRangeY = currentShape.pupilRangeY
self.shapeList.append(newEyeShape)
self.addToXML(newEyeShape)
if self.speak.get() == 1:
picoh.say(currentShape.name + " Duplicated")
self.loadShape(shapeName=currentShape.name + " (Copy)", internal=True)
self.updatePicoh()
self.popupMenu.destroy()
self.refreshShapeList()
self.cancelNewShape()
# Function to read XML files
def xmlReadin(self):
file = picoh.eyeShapeFile
self.tree = etree.parse(file)
index = 0
for element in self.tree.iter():
if element.tag == "Name":
self.shapeList.append(EyeShape(str(element.text), "", False, 5, 5))
if element.tag == "PupilRangeX":
self.shapeList[index].pupilRangeX = int(element.text)
if element.tag == "PupilRangeY":
self.shapeList[index].pupilRangeY = int(element.text)
if element.tag == "Hex":
self.shapeList[index].hexString = element.text
if element.tag == "AutoMirror":
if element.text == "true":
self.shapeList[index].autoMirror = True
else:
self.shapeList[index].autoMirror = False
index = index + 1
def openHex(self, hexString, loading=False):
# Empty binary strings
shapeBinary = ''
pupilBinary = ''
blinkOneBinary = ''
blinkTwoBinary = ''
blinkThreeBinary = ''
blinkFourBinary = ''
# Load into binary strings for each char
for hexBit in hexString[:18]:
shapeBinary = shapeBinary + self.hexToBin(hexBit)
for hexBit in hexString[90:108]:
pupilBinary = pupilBinary + self.hexToBin(hexBit)
for hexBit in hexString[18:36]:
blinkOneBinary = blinkOneBinary + self.hexToBin(hexBit)
for hexBit in hexString[36:54]:
blinkTwoBinary = blinkTwoBinary + self.hexToBin(hexBit)
for hexBit in hexString[54:72]:
blinkThreeBinary = blinkThreeBinary + self.hexToBin(hexBit)
for hexBit in hexString[72:90]:
blinkFourBinary = blinkFourBinary + self.hexToBin(hexBit)
# Load Matrix with binary strings.
self.loadMatrix(pupilBinary, 1, loading)
self.loadMatrix(shapeBinary, 0, loading)
self.loadMatrix(blinkOneBinary, 2, loading)
self.loadMatrix(blinkTwoBinary, 3, loading)
self.loadMatrix(blinkThreeBinary, 4, loading)
self.loadMatrix(blinkFourBinary, 5, loading)
# Function to flip the state of a button at given coordinate.
def flipButton(self, i, j, grid):
if grid == 1:
if self.gridArrayOne[i][j] == 0:
self.turnPupilOff(i, j)
else:
self.turnPupilOn(i, j)
if self.getGridArray(grid)[i][j] == 0:
self.turnButtonOn(i, j, grid, loading=False)
else:
self.turnButtonOff(i, j, grid, loading=False)
if grid == 1:
self.updateRange()
self.updatePicoh()
# Turn pupil on at coordinate i, j
# Turn pupil on at coordinate i,j
def turnPupilOn(self, i, j):
if self.gridArray[i][j]:
self.buttonArray[i][j].config(image=onImage)
self.buttonArray[i][j].config(highlightbackground='grey')
else:
self.buttonArray[i][j].config(image=offImage)
self.buttonArray[i][j].config(highlightbackground='grey')
if self.operatingSystem == "Windows":
self.buttonArray[i][j].config(bg=self.buttonCol)
# Turn pupil off at coordinate i,j
def turnPupilOff(self, i, j):
if self.pupilVar:
self.buttonArray[i][j].config(highlightbackground=self.pupilButtonHighlightColour)
if self.operatingSystem == "Windows":
pass
# self.buttonArray[i][j].config(bg=self.pupilButtonHighlightColour)
self.buttonArray[i][j].config(image=offImage)
# Turn button on at coordinate i,j
def turnButtonOn(self, i, j, grid, loading):
# Prevent eye shape from turning on if pupil is on at this location
if self.gridArrayOne[i, j] and self.pupilVar and grid == 0 and not loading:
return
self.getGridArray(grid)[i][j] = 1
self.getButtonArray(grid)[i][j].config(highlightbackground='grey', image=onImage)
if self.operatingSystem == "Windows":
bg = self.buttonCol
self.saved = False
# Turn button off at coordinate i,j
def turnButtonOff(self, i, j, grid, loading):
# Prevent eye shape from turning off if pupil is on at this location
if self.gridArrayOne[i, j] and self.pupilVar and grid == 0 and not loading:
return
self.getGridArray(grid)[i][j] = 0
self.getButtonArray(grid)[i][j].config(highlightbackground='grey', image=offImage)
if self.operatingSystem == "Windows":
bg = self.buttonCol
self.saved = False
"""
Returns a hex string representing the current state of all grids
9 pairs of hex bits for each grid. Order: Eye,Pupil,Blink1,Blink2,Blink3,Blink4
"""
def hexFromGrids(self):
# Create an empty binary string and read each grid into it.
binaryStringIn = ''
order = [0, 2, 3, 4, 5, 1]
for grid in order:
for i in range(0, self.getGridArray(grid).shape[0]):
for j in range(0, self.getGridArray(grid).shape[1]):
binaryStringIn = str(binaryStringIn) + str(int(self.getGridArray(grid)[i][j]))
hd = (len(binaryStringIn) + 3) // 4
string = '%.*x' % (hd, int('0b' + binaryStringIn, 0))
return string
# Get hex string from grids and set Picoh's eyes to it.
def updatePicoh(self):
if self.picohConnected:
hexToSend = self.hexFromGrids()
picoh._setEyes(hexToSend, hexToSend, self.shapeList[self.shapeIndex].autoMirror)
# Toggle sending data to Picoh.
def picohToggle(self):
if self.picohConnected:
self.picohButton.config(image=logo)
self.picohConnected = False
else:
self.picohButton.config(image=logoOn)
self.picohConnected = True
self.updatePicoh()
# Function refresh all grids
def newButton(self):
self.newButton.grid_remove()
self.renameButton.grid_remove()
self.dupButton.grid_remove()
self.delButton.grid_remove()
self.popupMenu.destroy()
self.okayOne.grid(row=4, column=27, columnspan=4, sticky="w")
self.cancelOne.grid(row=4, column=31, columnspan=4, sticky="e")
self.entryPop.grid(row=3, column=27, columnspan=15,rowspan=2, sticky="nw")
def cancel(self):
self.refreshShapeList()
self.cancelNewShape()
def cancelNewShape(self):
self.entryPop.grid_remove()
self.entryPopTwo.grid_remove()
self.textLab.grid_remove()
self.but.grid_remove()
self.butCancel.grid_remove()
self.okayOne.grid_remove()
self.cancelOne.grid_remove()
self.okayTwo.grid_remove()
self.cancelTwo.grid_remove()
self.popupMenu.grid(row=3, column=27, columnspan=14, sticky="w")
self.newButton.grid(row=4, column=27, columnspan=4, sticky="w")
self.renameButton.grid(row=4, column=31, columnspan=4, sticky="e")
self.delButton.grid(row=5, column=27, columnspan=4, sticky="w")
self.dupButton.grid(row=5, column=31, columnspan=4, sticky="e")
self.entryPop.delete(0, Tk.END)
self.entryPopTwo.delete(0, Tk.END)
def newShape(self, *args):
newName = self.entryPop.get()
if newName == "":
self.cancelNewShape()
print("Please enter a name")
return
newEyeShape = EyeShape("New", "", False, 5, 5)
newEyeShape.autoMirror = "False"
newEyeShape.hexString = "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
newEyeShape.name = newName
newEyeShape.pupilRangeX = 5
newEyeShape.pupilRangeY = 5
self.addToXML(newEyeShape)
self.shapeList.append(newEyeShape)
self.refreshShapeList()
self.loadShape(shapeName=newName, internal=True)
self.cancelNewShape()
self.updatePicoh()
def renameButton(self):
self.newButton.grid_remove()
self.renameButton.grid_remove()
self.dupButton.grid_remove()
self.delButton.grid_remove()
self.popupMenu.destroy()
self.entryPopTwo.grid(row=3, column=27, columnspan=10,rowspan = 2, sticky="nw")
self.okayTwo.grid(row=4, column=27, columnspan=4, sticky="w")
self.cancelTwo.grid(row=4, column=31, columnspan=4, sticky="e")
self.entryPopTwo.delete(0, Tk.END)
self.entryPopTwo.insert(0, self.tkvar.get())
def rename(self, *args):
if self.entryPopTwo.get() == "":
print("Please enter a name")
self.cancelNewShape()
return
oldName = self.shapeList[self.shapeIndex].name
newName = self.entryPopTwo.get()
self.shapeList[self.shapeIndex].name = self.entryPopTwo.get()
self.refreshShapeList()
self.tkvar.set(self.entryPopTwo.get())
self.cancelNewShape()
self.renameInXML(oldName, newName)
if self.speak.get() == 1:
picoh.say(oldName + " renamed to " + newName)
def deleteShapeButton(self):
self.newButton.grid_remove()
self.renameButton.grid_remove()
self.dupButton.grid_remove()
self.delButton.grid_remove()
self.popupMenu.destroy()
self.textLab.grid(row=3, column=27, columnspan=7, sticky="ws")
self.textLab.config(bg=self.bgCol, font=self.customFont)
self.but.grid(row=4, column=27, columnspan=4, sticky="w")
self.but.configure(highlightbackground=self.bgCol, font=self.customFont)
self.butCancel.grid(row=4, column=31, columnspan=4, sticky="e")
self.butCancel.configure(highlightbackground=self.bgCol, font=self.customFont)
def deleteShape(self):
for idx, shape in enumerate(self.shapeList):
if shape.name == self.shapeList[self.shapeIndex].name:
self.removeFromXML(shape.name)
del self.shapeList[idx]
break
self.refreshShapeList()
self.loadShape(True, self.shapeList[0].name)
self.cancelNewShape()
# self.tkvar.set(self.shapeList[0].name)
self.updatePicoh()
if self.speak.get() == 1:
picoh.say(shape.name + " deleted")
# Load shape and set grids to it.
def loadShape(self, internal, shapeName, loading=False, *args):
if shapeName:
for index, shape in enumerate(self.shapeList):
if shape.name == shapeName:
chosenShape = shape
self.shapeIndex = index
else:
for index, shape in enumerate(self.shapeList):
if shape.name == self.tkvar.get():
chosenShape = shape
self.shapeIndex = index
# if loading == True:
if self.speak.get() == 1:
picoh.say(chosenShape.name + " loaded")
self.openHex(chosenShape.hexString, loading)
self.filenamelabel.config(text=chosenShape.name, font=self.customFont)
#self.parent.title("Picoh Eye Shape Designer - " + chosenShape.name)
self.currentfilename = chosenShape.name
self.displayRange()
if self.operatingSystem != "Linux":
self.xRangeVar.set(str(chosenShape.pupilRangeX))
self.yRangeVar.set(str(chosenShape.pupilRangeY))
if chosenShape.autoMirror:
self.mirrorVar.set(1)
else:
self.mirrorVar.set(0)
picoh.move(picoh.EYETILT, 5)
picoh.move(picoh.EYETURN, 5)
self.updatePicoh()
self.pupilTrackAction()
self.tkvar.trace_vdelete("w", self.tkvar.trace_id)
self.tkvar.set(chosenShape.name)
self.tkvar.trace_id = self.tkvar.trace_variable("w", self.loadShape)
self.checkBoxAction()
self.checkBoxAction()
# Check box action for automirror check box.
def mirrorChange(self):
if self.mirrorVar.get() == 1:
self.shapeList[self.shapeIndex].autoMirror = True
else:
self.shapeList[self.shapeIndex].autoMirror = False
self.updatePicoh()
picoh.move(picoh.EYETURN, 5)
picoh.move(picoh.EYETILT, 5)
self.updateXML(self.shapeList[self.shapeIndex])
# For a given hex bit return the binary string.
@staticmethod
def hexToBin(hexBit):
if hexBit == '0':
return "0000"
elif hexBit == '1':
return "0001"
elif hexBit == '2':
return "0010"
elif hexBit == '3':
return "0011"
elif hexBit == '4':
return "0100"
elif hexBit == '5':
return "0101"
elif hexBit == '6':
return "0110"
elif hexBit == '7':
return "0111"
elif hexBit == '8':
return "1000"
elif hexBit == '9':
return "1001"
elif hexBit == 'a' or hexBit == 'A':
return "1010"
elif hexBit == 'b' or hexBit == 'B':
return "1011"
elif hexBit == 'c' or hexBit == 'C':
return "1100"
elif hexBit == 'd' or hexBit == 'D':
return "1101"
elif hexBit == 'e' or hexBit == 'E':
return "1110"
elif hexBit == 'f' or hexBit == 'F':
return "1111"
else:
print("not a hex char:")
print(hexBit)
return '0000'
# Reset a given grid, clearing all buttons.
def reset(self, grid):
for j in range(8):
for i in range(9):
self.turnButtonOff(i, j, grid, loading=False)
if grid == 1:
self.turnPupilOn(i, j)
if grid == 0:
self.gridArray[i][j] = 0
if grid == 1:
self.updateRange()
self.updatePicoh()
self.updateXML(self.shapeList[self.shapeIndex])
# Load a given grid with a binary string.
def loadMatrix(self, string, grid, loading=False):
count = 0
for char in string:
y = count % 8
x = int(count / 8)
if grid == 1:
if char == '1':
self.turnButtonOn(x, y, grid, loading)
self.turnPupilOff(x, y)
else:
self.turnButtonOff(x, y, grid, loading)
self.turnPupilOn(x, y)
else:
if char == '1':
self.turnButtonOn(x, y, grid, loading)
else:
self.turnButtonOff(x, y, grid, loading)
count += 1
# Return the gridArray Object for a given grid number
def getGridArray(self, grid):
if grid == 0:
return self.gridArray
if grid == 1:
return self.gridArrayOne
if grid == 2:
return self.gridArrayTwo
if grid == 3:
return self.gridArrayThree
if grid == 4:
return self.gridArrayFour
if grid == 5:
return self.gridArrayFive
# Return the buttonArray Object for a given grid number
def getButtonArray(self, grid):
if grid == 0:
return self.buttonArray
if grid == 1:
return self.buttonArrayOne
if grid == 2:
return self.buttonArrayTwo
if grid == 3:
return self.buttonArrayThree
if grid == 4:
return self.buttonArrayFour
if grid == 5:
return self.buttonArrayFive
# Action for mouse down
def OnMouseDown(self, event, x, y, grid):
# Hasn't left a square yet so started moving is False
startedMoving = False
# Decided if the stroke should draw or erase nodes
if self.getGridArray(grid)[x, y]:
self.drawing = False
else:
self.drawing = True
# Flip the button that triggered the event
self.flipButton(x, y, grid)
# Send new grid to Picoh
if grid == 0 or grid == 1:
self.updatePicoh()
# Action for mouse move
def OnMouseMove(self, event, grid):
# If mouse is not outside original button do nothing.
if not self.startedMoving:
return
# Calculate an offset to account for the window being moved.
offsetx = root.winfo_x() - self.rootx
offsety = root.winfo_y() - self.rooty
# Map the pixel coordinate of the event to the corresponding grid coordinate.
coordinateX = ((event.x_root - 56 - offsetx) / 24)
coordinateY = ((event.y_root - 100 - offsety) / 24) - 1
if self.operatingSystem == "Windows":
coordinateX = ((event.x_root - 28 - offsetx) / 25)
coordinateY = ((event.y_root - 85 - offsety) / 25) - 1
if grid > 3:
coordinateX = coordinateX + 1
if self.operatingSystem == "Linux":
coordinateX = ((event.x_root - 32 - offsetx) / 26)
coordinateY = ((event.y_root - 70 - offsety) / 26) - 1
# print(str(coordinateX)+"\n"+str(coordinateY))
if coordinateY > 9:
coordinateY = coordinateY + 0.5
coordinateY = coordinateY % 10
if coordinateX > 8:
coordinateX = coordinateX % 9
# Constrain coordinates to between 0 - 8.
if coordinateX > 8:
return
if coordinateX > 7:
coordinateX = 7
if coordinateX < 0:
coordinateX = 0
if coordinateY > 8:
coordinateY = 8
if coordinateY < 0:
coordinateY = 0
# If in drawing mode, turn on the button at the coordinate and update Picoh's matrix.
# Otherwise, turn off button at the coordinate and update Picoh's matrix.
if self.drawing:
self.turnButtonOn(int(coordinateY), int(coordinateX), grid, loading=False)
if grid == 1:
self.turnPupilOff(int(coordinateY), int(coordinateX))
else:
self.turnButtonOff(int(coordinateY), int(coordinateX), grid, loading=False)
if grid == 1:
self.turnPupilOn(int(coordinateY), int(coordinateX))
self.updatePicoh()
def motion(self, event):
if not self.pupilTrack.get():
return
x, y = event.x, event.y
x = root.winfo_pointerx() - root.winfo_rootx()
y = root.winfo_pointery() - root.winfo_rooty()
parentName = event.widget.winfo_parent()
parent = event.widget._nametowidget(parentName) # event.widget is your widget
scaledX = x / parent.winfo_width()
scaledX = scaledX * 10
scaledY = y / parent.winfo_height()
scaledY = scaledY * 10
if scaledX < 10 or scaledX > 0 or scaledY < 10 or scaledY > 0:
picoh.move(picoh.EYETURN, scaledX)
picoh.move(picoh.EYETILT, 10 - scaledY)
def saveShape(self):
self.shapeList[self.shapeIndex].hexString = self.hexFromGrids()
self.saved = True
# self.updateRange()
self.updateXML(self.shapeList[self.shapeIndex])
# print("saved!")
# print(self.shapeList[self.shapeIndex].hexString)
# Mouse up action for buttons.
def OnMouseUp(self, event):
self.updateRange()
self.clickedDown = False
# self.uRange()
if not self.saved:
t = Timer(0.3, self.saveShape)
t.start() # after 1 second save.
def testBlink(self):
for x in range(10, 0, -2):
picoh.move(picoh.LIDBLINK, x)
picoh.wait(0.04)
for x in range(0, 10, 2):
picoh.move(picoh.LIDBLINK, x)
picoh.wait(0.04)
# Mouse leave action for buttons.
def OnMouseLeave(self, event):
self.startedMoving = True
# Pupil toggle check box callback.
def checkBoxAction(self):
if self.pupilVar:
self.pupilVar = False
for j in range(8):
for i in range(9):
self.turnPupilOn(i, j)
else:
self.pupilVar = True
for j in range(8):
for i in range(9):
if self.gridArrayOne[i][j] == 1:
self.turnPupilOff(i, j)
class SpeechDatabasePage(Tk.Frame):
global phraseList, rowList, numberOfRows
class Phrase(object):
def __init__(self, set, variable, text):
self.set = set
self.variable = variable
self.text = text
def generateRow(self,phrase, rowNo, frame):
row = []
# print(phrase.text)
e = Tk.Entry(frame, font=self.customFont)
e.insert(0, phrase.set)
e.config(width=5)
e.bind("<Return>", self.callback)
e.bind("<FocusOut>", self.callback)
e.grid(column=0, row=rowNo+1)
row.append(e)
e1 = Tk.Entry(frame, font=self.customFont)
e1.insert(0, phrase.variable)
e1.config(width=8)
e1.bind("<Return>", self.callback)
e1.bind("<FocusOut>", self.callback)
e1.grid(column=1, row=rowNo+1)
row.append(e1)
e2 = Tk.Entry(frame, font=self.customFont)
e2.insert(0, phrase.text)
e2.config(width=89)
e2.bind("<Return>", self.callback)
e2.bind("<FocusOut>", self.callback)
e2.grid(column=2, row=rowNo+1)
e2.bind('<Control-a>', self.selAll)
e2.bind('<Control-a>', self.selAll)
row.append(e2)
b1 = Tk.Button(frame, font=self.customFont, text="Speak",command=lambda text=1:picoh.say(e2.get()))
b1.grid(column=3, row=rowNo+1)
b1.bind('<Control-a>', self.selAll)
row.append(e2)
return row
def selectall(event):
event.widget.tag_add("sel", "1.0", "end")
return "break"
def selAll(event):
event.widget.select_range(0, len(event.widget.get()))
return "break"
def new(self):
global numberOfRows, phraseList, rowList
newPhrase = self.Phrase(0, 0, "")
row = self.generateRow(newPhrase, self.numberOfRows + 1, self.frame)
self.phraseList.append(newPhrase)
self.rowList.append(row)
self.numberOfRows = self.numberOfRows + 1
self.refreshCanvas()
self.canvas.yview_moveto(1)
def refreshCanvas(self):
self.canvas.delete("all")
self.canvas.create_window(0, 0, anchor='n', window=self.frame)
self.canvas.update_idletasks()
self.canvas.configure(scrollregion=self.canvas.bbox('all'),yscrollcommand=self.scroll_y.set)
rowHeight = 26
blockMax = 400
offset = blockMax - (rowHeight * self.numberOfRows)
def callbackWin(self,*args):
with open(self.file, 'w', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerow(["Set", "Variable", "Phrase"])
#print("Win")
for row in self.rowList:
set = row[0].get()
variable = row[1].get()
phrase = row[2].get()
if phrase:
writer.writerow([set, variable, phrase])
def callback(self,*args):
if platform.system() == "Windows":
self.callbackWin(self)
else:
with open(self.file, 'w') as writeFile:
writer = csv.writer(writeFile, delimiter=',')
writer.writerow(["Set", "Variable", "Phrase"])
for row in self.rowList:
set = row[0].get()
variable = row[1].get()
phrase = row[2].get()
if phrase:
writer.writerow([set, variable, phrase])
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
callback()
root.destroy()
def _on_mousewheel(self, event):
if event.delta < 0:
self.canvas.yview_moveto(self.canvas.yview()[0]+0.01)
else:
self.canvas.yview_moveto(self.canvas.yview()[0]-0.01)
def __init__(self,parent,frameIn):
operatingSystem = platform.system()
if operatingSystem == "Darwin":
self.customFont = tkFont.Font(family="Letter Gothic Std", size=11)
if operatingSystem == "Windows" or operatingSystem == "Linux":
self.customFont = tkFont.Font(family="Helvetica", size=8)
speechFile = 'picohData/PicohSpeech.csv'
# directory = picoh.dir
self.numberOfRows = 0
self.phraseList = []
self.rowList = []
self.file = speechFile
self.parent = parent
self.canvas = Tk.Canvas(frameIn)
self.scroll_y = Tk.Scrollbar(frameIn, orient="vertical", command=self.canvas.yview)
self.canvas.config(width=830, height=420, bg='white', highlightthickness=0)
self.frame = Tk.Frame(self.canvas)
self.canvas.create_window(0, 0, anchor='nw', window=self.frame)
# make sure everything is displayed before configuring the scrollregion
self.canvas.update_idletasks()
self.canvas.configure(scrollregion=self.canvas.bbox('all'),
yscrollcommand=self.scroll_y.set)
self.canvas.grid(column=0, row=1)
self.scroll_y.grid(column=1, row=1, sticky="ns")
with open(self.file, 'r')as f:
data = csv.reader(f)
for row in data:
if row[0] != '' and row[0] != 'Set':
if row[0] == '' and row[1] == '':
newPhrase = self.Phrase('', '', row[2])
self.phraseList.append(newPhrase)
elif row[0] == '' and row[1] != '':
newPhrase = self.Phrase('', int(row[1]), row[2])
self.phraseList.append(newPhrase)
elif row[0] != '' and row[1] == '':
newPhrase = self.Phrase(row[0], '', row[2])
self.phraseList.append(newPhrase)
else:
newPhrase = self.Phrase(row[0], row[1], row[2])
self.phraseList.append(newPhrase)
#self.parent.title("Picoh Speech DB")
self.parent.grid_rowconfigure(1, weight=0)
self.parent.grid_columnconfigure(1, weight=0)
root.title("Picoh - Tools")
root.configure(bg='white')
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
#root.protocol("WM_DELETE_WINDOW", on_closing)
# group of widgets
for phrase in self.phraseList:
row = self.generateRow(phrase, self.numberOfRows, self.frame)
self.numberOfRows = self.numberOfRows + 1
self.rowList.append(row)
rowHeight = 26
blockMax = 400
offset = blockMax - (rowHeight * self.numberOfRows)
directory = picoh.directory
saveButton = Tk.Button(self.frame, text="Save", command=self.callback, font=self.customFont)
# saveButton.grid(column=1, row=1, sticky='w')
addButton = Tk.Button(frameIn, image= plusImage, command=self.new, font=self.customFont)
addButton.grid(column=0, row=2, sticky='w')
picohLabel = Tk.Label(frameIn, image=picohImage)
picohLabel.grid(column=0, row=3)
setLabel = Tk.Label(frameIn, text="Set", font=self.customFont, width=3, bg='white')
setLabel.grid(column=0, row=0, sticky='w')
variableLabel = Tk.Label(frameIn, text="Variable", font=self.customFont, width=10, bg='white')
variableLabel.grid(column=0, row=0, sticky='w',padx=40)
phraseLabel = Tk.Label(frameIn, text="Phrase (Leave blank to delete)", font=self.customFont, bg='white')
phraseLabel.grid(column=0, row=0, sticky='w',padx=115)
self.canvas.yview_moveto(1)
self.refreshCanvas()
class Calibrate(Tk.Frame):
global button, stage, lipMinRaw, lipMaxRaw, tempMin, tempMax
def setLipPos(self,*args):
if self.started:
picoh.attach(picoh.BOTTOMLIP)
# Convert position (0-10) to a motor position in degrees
# Scale range of speed
spd = (250 / 10) * 2
# Construct message from values
msg = "m0" + str(picoh.BOTTOMLIP) + "," + str(self.var.get()) + "," + str(spd) + "\n"
# Write message to serial port
picoh._serwrite(msg)
# Update motor positions list
picoh.motorPos[picoh.BOTTOMLIP] = self.var.get()
def ResetRangeToRawCentre(self):
# Find the range
lipRange = self.lipMaxRaw - self.lipMinRaw
center = self.var.get()
# Limit to 1000 if something's gone wrong
if lipRange > 1000:
lipRange = 1000
# If raw plus half the range goes over 1000 limit the range to stop it
over = center + (lipRange / 2) - 1000
if over > 0:
lipRange = over - (over * 2)
# If raw minus half the range goes below 0 limit the range to stop it
under = lipRange / 2 - center
if under > 0:
lipRange = lipRange - (under * 2)
self.tempMin = int(center - (lipRange / 2))
self.tempMax = int(center + (lipRange / 2))
def writeToXML(self,minimum, maximum):
file = 'picohData/MotorDefinitionsPicoh.omd'
tree = etree.parse(file)
root = tree.getroot()
for child in root:
if child.get("Name") == "BottomLip":
child.set("Min", str(minimum))
child.set("Max", str(maximum))
with open(file, 'wb') as f:
f.write(etree.tostring(tree))
f.close()
def ResetRangeToRawMin(self):
smilePos = self.var.get()
# Find the mid position which was hopefully set by step 1
midRaw = self.tempMin + ((self.tempMax - self.tempMin) / 2)
lipRange = (midRaw - smilePos) * 2
# Stop the max being more than 1000
if smilePos + lipRange > 1000:
lipRange = 1000 - smilePos
# The current position should set the new min
minimum = int(smilePos)
maximum = int(smilePos + lipRange)
scaledMinimum = int(minimum / 180 * 1000)
scaledMaximum = int(maximum / 180 * 1000)
self.writeToXML(scaledMinimum, scaledMaximum)
def sel(self):
global frameUsed
if self.stage == 2:
picoh.reset()
self.stage=-1
self.sel()
return
if self.stage == 1:
self.ResetRangeToRawMin()
self.label.config(text="All done!")
self.started = False
picoh.reset()
self.button.config(text="Restart")
self.stage = 2
if self.stage == 0:
selection = "Value = " + str(self.var.get())
self.label.config(
text="Slowly move the slider to the right, stop when the bottom lip pops the top lip into a smile.")
self.button.config(text="Set Smile Point")
self.ResetRangeToRawCentre()
self.stage = 1
# Set headnod to 5
picoh.move(picoh.HEADNOD, 10)
# Move bottom lip to 4
picoh.move(picoh.BOTTOMLIP, 4)
# Wait 250ms
picoh.wait(0.25)
# Move bottom lip to 6
picoh.move(picoh.BOTTOMLIP, 6)
if self.stage == -1:
picoh.move(picoh.HEADNOD, 8)
# Move bottom lip to 4
picoh.move(picoh.BOTTOMLIP, 4)
# Wait 250ms
picoh.wait(0.25)
# Move bottom lip to 8
picoh.move(picoh.BOTTOMLIP, 8)
self.label.config(text='Slowly move the slider to the left until the bottom lip just touches the top lip')
self.button.config(text='Set Mid-point.')
self.stage = 0
self.started = True
root.after(0, self.update, 0)
def update(self,ind):
if self.stage == 0:
frame = frames[ind]
self.graphic.configure(image=frame)
if self.stage == 1:
frame = framesTwo[ind]
self.graphic.configure(image=frame)
ind += 1
if ind == len(frames) and self.stage == 0:
ind = 0
if ind == len(framesTwo) and self.stage == 1:
ind = 0
if ind == 0:
root.after(2000,self.update, ind)
else:
root.after(20,self.update, ind)
def __init__(self,parent,frameIn):
self.started = False
self.stage = -1
self.graphic = Tk.Label(frameIn)
self.graphic.config(width=10000)
frame = frames[len(frames)-1]
self.graphic.configure(image=frame)
self.graphic.pack(anchor='ne')
operatingSystem = platform.system()
if operatingSystem == "Darwin":
self.customFont = tkFont.Font(family="Letter Gothic Std", size=11)
if operatingSystem == "Windows" or operatingSystem == "Linux":
self.customFont = tkFont.Font(family="Helvetica", size=8)
# Get min and max positions.
self.lipMinRaw = picoh.motorMins[picoh.BOTTOMLIP]
self.lipMaxRaw = picoh.motorMaxs[picoh.BOTTOMLIP]
lipRange = self.lipMaxRaw - self.lipMinRaw
# Extend Ranges
if self.lipMinRaw - lipRange / 5 > 0:
self.lipMinRaw = self.lipMinRaw - lipRange / 5
else:
self.lipMinRaw = 0
if self.lipMaxRaw + lipRange / 5 < 1000:
self.lipMaxRaw = self.lipMaxRaw + lipRange / 5
else:
self.lipMaxRaw = 1000
self.parent = parent
self.frame = frameIn
self.var = Tk.IntVar()
self.var.set(picoh._getPos(picoh.BOTTOMLIP, picoh.motorPos[picoh.BOTTOMLIP]))
scale = Tk.Scale(self.frame, variable=self.var, from_=self.lipMaxRaw, length=wDim - 140, to=self.lipMinRaw, orient=Tk.HORIZONTAL)
self.var.trace_variable("w", self.setLipPos)
scale.pack(anchor='s')
self.label = Tk.Label(self.frame,font=self.customFont, text='')
self.label.pack()
self.button = Tk.Button(self.frame, text="Start", command=self.sel,font=self.customFont)
self.button.pack(anchor=Tk.CENTER)
#root.after(0, self.update, 0)
class SettingsPage(Tk.Frame):
def __init__(self,parent,frameIn):
operatingSystem = platform.system()
if operatingSystem == "Darwin":
self.customFont = tkFont.Font(family="Letter Gothic Std", size=11)
if operatingSystem == "Windows" or operatingSystem == "Linux":
self.customFont = tkFont.Font(family="Helvetica", size=8)
label1 = Tk.Label(frameIn,text="Default Eye Shape:",font=self.customFont)
label1.grid(row=0,column=0)
self.entry1 = Tk.Entry(frameIn,width = 50,font=self.customFont)
self.entry1.insert(0, picoh.defaultEyeShape)
self.entry1.grid(row=0, column = 1)
self.entry1.bind("<Return>", self.writeToXML)
self.entry1.bind("<FocusOut>", self.writeToXML)
label2 = Tk.Label(frameIn,text="Default Speech Synth:",font=self.customFont)
label2.grid(row=1,column=0)
self.entry2 = Tk.Entry(frameIn,width = 50,font=self.customFont)
self.entry2.insert(0,picoh.synthesizer)
self.entry2.grid(row=1, column = 1)
self.entry2.bind("<Return>", self.writeToXML)
self.entry2.bind("<FocusOut>", self.writeToXML)
label3 = Tk.Label(frameIn,text="Default Voice:",font=self.customFont)
label3.grid(row=2,column=0)
self.entry3 = Tk.Entry(frameIn,width = 50,font=self.customFont)
self.entry3.insert(0,picoh.voice)
self.entry3.grid(row=2, column = 1)
self.entry3.bind("<Return>", self.writeToXML)
self.entry3.bind("<FocusOut>", self.writeToXML)
label5 = Tk.Label(frameIn,text="Default Language/Voice (gTTS):",font=self.customFont)
label5.grid(row=3,column=0)
self.entry5 = Tk.Entry(frameIn,width = 50,font=self.customFont)
self.entry5.insert(0,picoh.language)
self.entry5.grid(row=3, column = 1)
self.entry5.bind("<Return>", self.writeToXML)
self.entry5.bind("<FocusOut>", self.writeToXML)
label6 = Tk.Label(frameIn,text="Port:",font=self.customFont)
label6.grid(row=4,column=0)
label11 = Tk.Text(frameIn,width=50,font=self.customFont,height =1, highlightthickness=0)
label11.insert('1.0', picoh.port)
label11.config(state="disabled")
label11.grid(row=4, column = 1)
label4 = Tk.Label(frameIn,text="Sounds Folder:",font=self.customFont)
#label4.grid(row=5,column=0)
self.entry4 = Tk.Entry(frameIn,width=50,font=self.customFont)
self.entry4.insert(0,picoh.soundFolder + "/")
#self.entry4.grid(row=5, column = 1)
label7 = Tk.Label(frameIn,text="SpeechDB File:",font=self.customFont)
label7.grid(row=6,column=0)
self.entry7 = Tk.Entry(frameIn,width=50,font=self.customFont)
self.entry7.insert(0,picoh.speechDatabaseFile)
self.entry7.grid(row=6, column = 1)
self.entry7.bind("<Return>", self.writeToXML)
self.entry7.bind("<FocusOut>", self.writeToXML)
label8 = Tk.Label(frameIn,text="EyeShape List:",font=self.customFont)
label8.grid(row=7,column=0)
self.entry8 = Tk.Entry(frameIn,width=50,font=self.customFont)
self.entry8.insert(0,picoh.eyeShapeFile)
self.entry8.grid(row=7, column = 1)
self.entry8.bind("<Return>", self.writeToXML)
self.entry8.bind("<FocusOut>", self.writeToXML)
label9 = Tk.Label(frameIn,text="Motor Def File:",font=self.customFont)
label9.grid(row=8,column=0)
self.entry9 = Tk.Entry(frameIn,width=50,font=self.customFont)
self.entry9.insert(0,picoh.picohMotorDefFile)
self.entry9.grid(row=8, column = 1)
self.entry9.bind("<Return>", self.writeToXML)
self.entry9.bind("<FocusOut>", self.writeToXML)
label9 = Tk.Label(frameIn,text="Picoh Python library:",font=self.customFont)
label9.grid(row=9,column=0)
label10 = Tk.Text(frameIn,width=50,font=self.customFont, height =2, highlightthickness=0)
label10.insert('1.0', picoh.directory)
label10.config(state="disabled")
label10.grid(row=9, column = 1)
label10.bind('<1>', lambda event: label10.focus_set())
label11.bind('<1>', lambda event: label11.focus_set())
button = Tk.Button(frameIn,text = "Save",command = self.writeToXML)
button.grid(row=10,column=0)
def writeToXML(self,*args):
file = picoh.settingsFile
tree = etree.parse(file)
root = tree.getroot()
for child in root:
if child.get("Name") == "DefaultEyeShape":
child.set("Value", self.entry1.get())
picoh.defaultEyeShape = self.entry1.get()
if child.get("Name") == "DefaultSpeechSynth":
child.set("Value", self.entry2.get())
picoh.setSynthesizer(self.entry2.get())
if child.get("Name") == "DefaultVoice":
child.set("Value", self.entry3.get())
picoh.setVoice(self.entry3.get())
if child.get("Name") == "DefaultLang":
child.set("Value", self.entry5.get())
picoh.setLanguage(self.entry5.get())
if child.get("Name") == "SpeechDBFile":
child.set("Value", self.entry7.get())
if child.get("Name") == "EyeShapeList":
child.set("Value", self.entry8.get())
if child.get("Name") == "MotorDefFile":
child.set("Value", self.entry9.get())
with open(file, 'wb') as f:
f.write(etree.tostring(tree))
f.close()
if __name__ == "__main__":
root = Tk.Tk()
directory = picoh.directory
imageFile = os.path.join(directory, 'Images/onsmaller.gif')
onImage = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/offsmaller.gif')
offImage = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/picohlogo.gif')
logo = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/picohlogoOn.gif')
logoOn = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/movedown.gif')
copyDown = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/moveright.gif')
copyRight = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/pixel.gif')
pixelImage = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/plus.gif')
plusImage = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/picohlogoSmall.gif')
picohImage = Tk.PhotoImage(file=imageFile)
imageFile = os.path.join(directory, 'Images/calibrate400.gif')
frames = [Tk.PhotoImage(file=imageFile,format = 'gif -index %i' %(i)) for i in range(52)]
imageFile = os.path.join(directory, 'Images/calibrate2400.gif')
framesTwo = [Tk.PhotoImage(file=imageFile,format = 'gif -index %i' %(i)) for i in range(53)]
if platform.system() == "Darwin":
xDim = 120
yDim = 140
hDim = 560
wDim = 903
if platform.system() == "Windows":
xDim = 20
yDim = 40
hDim = 495
wDim = 850
if platform.system() == "Linux":
xDim = 20
yDim = 40
hDim = 560
wDim = 915
root.geometry('%dx%d+%d+%d' % (wDim, hDim, xDim, yDim))
root.configure(bg='white')
# root.resizable(1, 1)
nb = ttk.Notebook(root,width=wDim,height=hDim) # Create Tab Control
tab1 = Tk.Frame(nb,width = wDim,height = hDim)
nb.add(tab1, text='Eye Designer')
tab2 = Tk.Frame(nb)
nb.add(tab2, text='SpeechDB')
tab3 = Tk.Frame(nb)
nb.add(tab3, text='Calibrate')
tab4 = Tk.Frame(nb)
nb.add(tab4, text='Settings')
nb.enable_traversal()
if platform.system() == "Darwin":
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
eyeApp = PicohEyeDesigner(root,tab1)
speechApp = SpeechDatabasePage(root,tab2)
calibrateApp = Calibrate(root,tab3)
settingsApp = SettingsPage(root,tab4)
nb.pack()
root.mainloop()
|
from magenta.music.protobuf import music_pb2
def twinkle_twinkle():
twinkle = music_pb2.NoteSequence()
twinkle.notes.add(pitch=60, start_time=0.0, end_time=0.5, velocity=80)
twinkle.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=80)
twinkle.notes.add(pitch=67, start_time=1.0, end_time=1.5, velocity=80)
twinkle.notes.add(pitch=67, start_time=1.5, end_time=2.0, velocity=80)
twinkle.notes.add(pitch=69, start_time=2.0, end_time=2.5, velocity=80)
twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80)
twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80)
twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80)
twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80)
twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80)
twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80)
twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80)
twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)
twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)
twinkle.total_time = 8
twinkle.tempos.add(qpm=60)
return twinkle
|
#lattice1D.py
from __future__ import division,print_function
"""Functions for computing time evolution of wavefunctions in a moving 1D optical lattice.
Units are "natural", with 1=hbar=2m, for m=mass of particle, and electrical units
such that the dipole strength is 1, i.e. Rabi frequency = electric field strength.
"""
from numpy import *
from scipy import *
from pylab import *
from scipy import sparse
from scipy.linalg import expm
import numpy
import numbers
# Units:
hbar = 1.05457e-34
C = 299792458.0
eps0 = 8.85418782e-12
mu0 = 4.e-7*pi
eta0 = 377 # Impedance of free space
g = 9.81 # Earth's gravitational acceleration
M2 = 2*1.455e-25 # 2*Sr mass in kg
#d0 = 461.e-9
lam0 = 461.e-9 # A typical length scale in meters (Sr transition wavelength)
d0 = lam0/(2*pi) # NOTE: THIS IS THE RIGHT CHARACTERISTIC LENGTH FOR LATTICE RECOIL UNITS!
k0 = 1./d0 # Sr transition k-vector length
mu = 2.58e-29 # Sr 461 dipole strength in meter second Amperes
fSr = C/d0 # Sr 461 transition frequency in Hz = 6.503 e14
wSr = fSr*2*pi # Sr 461 transition angular frequency = 4.086 e15
f0 = hbar/(M2*d0**2) # Sr 461 characteristic frequency hbar/(2*m*d0^2) = 1.705 e3
w0 = f0*2*pi # Sr 461 characteristic angular frequency = 1.071 e4
U0 = hbar*f0 # Sr 461 characteristic energy (recoil energy/(2pi)^2) = 1.798 e-31
E0 = U0/mu # Sr 461 characteristic electric field = 6.970 e-3
a0 = d0*f0**2 # Sr 461 characteristic acceleration = 1.340
wSr0 = wSr/w0 # Sr 461 frequency in 461 units = 3.814 e11
gSr = g/a0 # Gravitational acceleration in Sr units (~8.6) = 7.318
EgSr = g*d0*M2 # Gravitational characteristic energy for Sr = 1.316 e-30 = gSr*U0
#---- Next are not "natural" but practical values
delta_typical = 5e12 # Typical (optimized) detuning for 461 nm transition
dtn0 = delta_typical/f0 # ... in 461 units = 2.93e9
Etyp = sqrt(2*dtn0*gSr) # Corresponding typical electric field = 2.07e5 in 461 units
#Note: We want lattice amplitude to be >= gravitational potential over d0
# => E^2/2dtn0 >= gSr (times d0 times M2, both 1)
# => E >= sqrt(2*dtn0*gSr) = 2.1e5 (= 2.1e5 E0 = 1444 in MKS)
# => power/area (magnitude of Poynting vector) > 1444^2/(2*eta0) = 2765 W/m^2 = .2765 W/cm^2
# This is a reasonable laser intensity
'''Had this written before: I think it's wrong:
#Note: We want electric field amplitude to be >= order of sqrt(gSr)~3
# => electric field > 2e-2
# => power/area (magnitude of Poynting vector) > 2e-2/(2*eta0) = 3e-5 W/m^2
# This is way smaller than real laser intensities (~1W/cm^2=1e4 W/m^2), so
# typical electric field strengths in the range '''
# Default laser parameters:
sk1=1.0; sk2 = -1.0
sE1 = Etyp; sE2 = Etyp; ####### NEED TO CHECK AGAINST GRAVITY IN Sr UNITS
sy1 = 0; sy2 = 0; # Phase
sw1 = wSr0 + dtn0; sw2 = lambda t: wSr0 + dtn0 + 10*t; # Angular frequency
# NOTE: 'Angular frequency' w is really (1/t) * int_0^t W(t')dt', where W(t') is the true instantaneous angular frequency
std = {'k1':sk1,'w1':sw1,'y1':sy1,'E1':sE1,
'k2':sk2,'w2':sw2,'y2':sy2,'E2':sE2,
'grav':None,'wr':wSr0}
def funcify(a):
''' Makes argument into a function of time .
If argument is already a function of time, return it.
'''
if callable(a):
return a
else:
return lambda t: a
def stdize(d,func="wyE"):
'''Takes a dictionary of input parameters and turns
the appropriate parameters into functions (time).
Also subs default values for those not provided.
'func' indicates which parameters to make into
functions.
'''
dc = std.copy()
dc.update(d)
if func=="wyE":
dc['w1'],dc['y1'],dc['E1'], \
dc['w2'],dc['y2'],dc['E2'] = map(funcify, [\
dc['w1'],dc['y1'],dc['E1'], \
dc['w2'],dc['y2'],dc['E2'] ])
return dc
std1 = stdize(std)
############ momentum space ###########
def HParams(k1=None,w1=None,y1=None,E1=None, # First laser's k-vector, frequency, phase, and amplitude
k2=None,w2=None,y2=None,E2=None, # Second laser
grav=None,delta=None,wr=None,aa=1.0,d=std):
''' Takes in laser parameters and spits out lattice Hamiltonian parameters
(for momentum space Schrodinger equation).
d is a dictionary for input parameters. Values in d are overriden by
parameters passed as arguments.
'''
d = d.copy()
inputs = { 'k1':k1,'w1':w1,'y1':y1,'E1':E1,
'k2':k2,'w2':w2,'y2':y2,'E2':E2,
'grav':grav,'delta':delta,'wr':wr,'aa':aa}
keys = inputs.keys()
for i in keys:
if inputs[i] is not None: # Throw out null inputs
d[i] = inputs[i] # Wrap everything into d
d = stdize(d)
# Get the detuning figured out:
if ('delta' not in d.keys()) or d['delta'] is None:
if ('wr' not in d.keys()) or d['wr'] is None:
raise ValueError('Either delta or wr must be supplied.')
d['delta'] = d['w1'](0)-d['wr']
if d['delta']==0:
raise ValueError("Detuning can't be zero.")
k = d['k1']-d['k2'] ########## MIGHT TRANSPOSE THIS
w = lambda t: d['w1'](t)-d['w2'](t)
y = lambda t: d['y1'](t)-d['y2'](t)
A = lambda t: d['aa']*d['E1'](t)*d['E2'](t)/(2.*d['delta'])
if grav is not None:
p_g = lambda t: grav*t/2. # Gravitational shift to momentum
else:
p_g = lambda t: 0.0
# From the above parameters, the lattice Hamiltonian (with gravity unitaried away)
# is given by H = (p - p_g)^2 + A[j]*cos(k[j]*x - w[j]*t + y[j])
ham = {'k':k,'w':w,'y':y,'A':A,'grav':grav,'p_g':p_g}
return ham
def avUniform(accel=0.,vel=0.,grav=gSr,gm=1.0,aa=1.0,
Run=True,q=0.,T=arange(0,3,.01),n=5,init=0,ret='cph',plt='c',talk=False):
'''Sets up (and optionally runs) a system with uniform acceleration +
constant velocity shift.
Signs for acceleration and velocity are the same as signs for x coordinates.
gm stands for "g-multiplier" and simply multiplies grav by the constant
supplied. This is so you can just take the default value of grav and
scale it.
aa is an overall multiplier for the lattice depth
run determines whether to run or just return Hamiltonian parameters.
ret determines what to return: 'c' means coefficients, 'p' means
momenta, 'h' means Hamiltonian parameters.
plt determines whether to plot output: 'c' means coefficients, 'b'
means bars, 'p' means momentum expectation value.
'''
g = grav*gm
w1 = lambda t: wSr0 + dtn0 + vel + accel*t
w2 = lambda t: wSr0 + dtn0
h = HParams(w1=w1,w2=w2,grav=g,aa=aa,delta=dtn0)
if Run:
c,p = psolver(h,q,T,n=n,init=init,talk=talk)
if plt:
for i in plt:
if i=='c':
figure();plot(abs(c))
if i=='b':
bars(c,p,-1)
if i=='p':
pexpect(c,p,out=False,plt=True)
retrn = []
for i in ret:
if i=='c':
retrn.append(c)
if i=='p':
retrn.append(p)
if i=='h':
retrn.append(h)
return tuple(retrn)
def psolver(ham,q=0.,T=arange(0,2,.02),dt0=.01,n=5,aa=1,init=0,talk='some',plt=False):
"""Solves p-space Schrodinger equation with parameters given by ham
for initial data given by 1 and init, such that the initial wavefunction
is given by:
|Y> = Sum_{j=0}^{2n+1} init[j] |q+(j-n)k>
for k = lattice wavenumber.
init need not be supplied, and in this case the initial wavefunction is
|Y> = |q>
init may also be an integer, in which case it is interpreted to mean a
lattice eigenvector with quasimomentum q and band index init >= 0.
input parameter aa is an overall scaling of lattice depth, for convenience.
If plt is None or a number, abs(coefficients) is plotted on figure(plt)
"""
N=2*n+1 # Size of matrices
c0 = zeros((len(T),N),dtype=complex) # Matrix of coefficients
k = ham['k']; p_g = ham['p_g']; A = ham['A']; y = ham['y']; w = ham['w'];
if init is None:
c0[0,n] = 1.0 # Initial data
elif hasattr(init,'__len__'):
c0[0,:] = init
elif isinstance(init,int):
tmp = eigs1(q,k,aa*A(0),init+1,n)
c0[0,:] = tmp[1][:,init]
else:
raise ValueError("init type not recognized. If you want a band eigenstate, make sure that init is an int.")
P = (q + arange(-n,n+1)*k) # Momentum
UP = eye(N,k=1); DN = eye(N,k=-1);
# Note: The way momentum is organized is so that increasing the index by 1 adds k
def D(coef,t): # Time derivative of coefficients
ph = exp(-1.j*(w(t)*t - y(t))) # phase
return -1.j * ((P-p_g(t))**2*coef + aa*A(t)/2. * ((1./ph)*DN.dot(coef) + ph*UP.dot(coef)))
tol = 1.e-6 # Absolute tolerance for time integration
finer = 1.5 # Increase in resolution after each successive integration attempt
for i in range(len(T)-1):
dt = min(dt0,1./(abs(w(T[i]))+1.e-15),1./amax(abs(D(c0[i,:],T[i]))))
nsteps = int(ceil((T[i+1]-T[i])/dt))
coef = midpoint(c0[i,:],D,T[i],T[i+1],nsteps)
err = tol*2
while (err>tol):
coef0 = coef
nsteps = int(ceil(nsteps*finer))
coef = midpoint(c0[i,:],D,T[i],T[i+1],nsteps)
err = amax(abs(coef-coef0))
if talk=='all':
print("Convergence: ",err,' vs. ',tol)
if err>tol:
print("Doing another iteration")
if talk=='all':
print("Time step ",i,": initial dt=",dt,", final error ",err,", nsteps=",nsteps,"\n")
elif talk=='some':
print("Completed time step ",i," of ",len(T))
c0[i+1,:] = coef
if plt is not False:
figure(plt)
plot(abs(c0))
return c0, P-array([[p_g(t) for t in T]]).T
###################### Time steppers ##############################
def Euler(coef0,D,t0,t1,nsteps):
"""Integrate coef from time t0 to t1 in nsteps Euler steps
D is a function of coef and time t, returning the derivative
of coef at that time."""
coef = coef0
nsteps = int(nsteps)
if nsteps <= 0:
raise ValueError("Number of steps for stepper must be positive.")
dt = (t1-t0)/nsteps
for i in range(nsteps):
t = t0*(nsteps-i)/nsteps + t1*i/nsteps
coef += dt*D(coef,t)
return coef
def midpoint(coef0,D,t0,t1,nsteps):
"""Integrate coef from time t0 to t1 in nsteps midpoint steps
D is a function of coef and time t, returning the derivative
of coef at that time."""
coef = copy(coef0) # Copy initial data so changes don't propagate backwards
nsteps = int(nsteps)
if nsteps <= 0:
raise ValueError("Number of steps for stepper must be positive.")
dt = (t1-t0)/nsteps
for i in range(nsteps):
t = t0*(nsteps-i)/nsteps + t1*i/nsteps
coef += dt*D(coef+dt*D(coef,t)/2.,t+dt/2.)
return coef
########################## 1D Band structure #####################
""" These 1D band structure functions are borrowed from bands.py"""
def tridiag1(q,b,amp,n,M=False):
'''Returns a tridiagonal (2n+1)x(2n+1) matrix representing the
1D optical lattice Schrodinger equation for quasimomentum q.
* q is quasimomentum
* b is the reciprocal lattice basis vector
* amp is the amplitude of the lattice
* n determines the size of the matrix (i.e. momentum cutoff)
'''
dia = (q+b*arange(-n,n+1))**2
up = amp/2. * ones(2*n,float)
dn = amp/2. * ones(2*n,float)
if M:
return diag(up,1)+diag(dia)+diag(dn,-1)
else:
return up,dia,dn
def eigs1(q,b,amp,nbands,n=False,returnM=False):
'''Returns nbands number of eigenenergies and eigenvectors
for a single quasimomentum q.
b is the reciprocal lattice basis vector, amp is lattice
amplitude, n is momentum cutoff (see bands.py).
'''
if not n:
n = nbands
M = tridiag1(q,b,amp,n,True)
enrg0,evec0 = linalg.eigh(M)
enrg = enrg0[:nbands]
evec = evec0[:,:nbands]
if not returnM:
return enrg,evec
else:
return enrg,evec,M
################# Visualizers #################
def getLat(ham,t,xs,aa=1):
"""Returns lattice values at time t and points xs (which can be an array)."""
A = ham['A'](t); k = ham['k']; y = ham['y'](t); w = ham['w'](t); g = ham['grav']
if not isinstance(g,numbers.Number):
g = 0
return aa*A * cos(k*xs - w*t + y) + g*xs/2
def plotLat(ham,t,xs=None,N=2,fig=None,aa=1):
"""Plots lattice using getLat.
xs is an array of points to plot. Alternatively,
N is a number of periods to plot (centered at zero).
xs overrides N."""
figure(fig)
if xs is not None:
plot(xs,getLat(ham,t,xs,aa))
else:
k = ham['k']
dx = 2.*pi/k/50.
x = arange(-N*pi/k,N*pi/k+dx,dx)
plot(x,getLat(ham,t,x,aa))
def bars(c,p,t,fn=abs,fig=None):
'''Makes bar plot of the coefficients c at time step t
(as function of momentum p).
'''
figure(fig)
poffset = (p[0,1]-p[0,0])/4. # This shift is applied to center the bars on the momentum
bar(p[t,:]-poffset,fn(c[t,:]))
def pexpect(c,p,t=None,plt=False,out=True,fig=None):
'''Returns expectation value of momentum as a function of time
(unless t is provided, in which case the expectation of p at just that
time step is returned).
When t is not provided, you can optionally plot the result in figure
fig, and also suppress the output, using plt and out keywords, resp.
'''
if t is not None:
return abs(c**2)[t,:].dot(p[t,:])
elif not plt:
return sum(abs(c**2)*p,1)
else:
pex = sum(abs(c**2)*p,1)
figure(fig)
plot(pex)
if out:
return pex
def checkUnitarity(c,plt=False,out='std',fig=None):
'''Checks unitarity by computing variation of the l2 norm of c vs. time.'''
l2s = sum(abs(c)**2,1)
stdev = numpy.std(l2s)/mean(l2s)
if plt:
figure(fig)
plot(l2s)
if out=='std':
return stdev
elif out=='l2s':
return l2s
"""
def LFrame(c,p,ham,q,t,a,adot,nband):
p_g = ham['p_g'](t)
k = ham['k']; amps = ham['A'](t)
p1 = p + p_g
nq = eigs1(q-p_g-adot/2.,k,amps,nband+1,n=len(c)//2)[1][:,-1]
return nq.dot(exp(1j*p1*a)*c)
"""
def Dt(f,t0,dt=1.e-6):
'''Time derivative of f(t) at t0.'''
return (f(t0+dt) - f(t0-dt))/(2.*dt)
def LFrame(c,p,ham,t,T=None,band=None):
if T is not None: # This indicates t is an index, and T[t] is the corresponding time
c = c[t]; p = p[t]; t = T[t]
k = ham['k']; A = ham['A']; y = ham['y']; w = ham['w']; dwdt = Dt(w,t)
v = (w(t) + dwdt*t)/k
c2 = exp(1j*v**2*t/4.)*exp(1j*p*v*t) * c
p2 = p - v/2.
if band is None:
return c2,p2
else: # If band is supplied, we dot the state into lattice frame eigenstates
if not hasattr(band,'__len__'):
band = array([band])
N = c2.shape[0]; n = (N-1)/2
q = p[n]
ph = y(t) + k*v*t - w(t)*t
evecs = eigs1(q,k,A(t),amax(band)+1,n)[1]
out = []
for i in range(len(band)):
out.append(abs(sum(evecs[:,i].dot(c2)))**2)
return array(out)
def SFrame(c,p,ham,t,T=None,band=None):
if T is not None:
c = c[t]; p = p[t]; t = T[t]
k = ham['k']; A = ham['A']; y = ham['y']; w = ham['w'];
B = lambda tau: (w(tau)*tau - y(tau))/k
dBdt = Dt(B,t)
c2 = exp(1j*B(t)*p)*c # This ignores an overall phase exp(-1j*int_0^t (.5*m*dbdt^2))
p2 = p-.5*dBdt
if band is None:
return c2,p2
else:
if not hasattr(band,'__len__'):
band = array([band])
N = c2.shape[0]; n = (N-1)/2
q = p[n]
evecs = eigs1(q,k,A(t),amax(band)+1,n)[1]
out = []
for i in range(len(band)):
out.append(abs(sum(evecs[:,i].dot(c2)))**2)
return array(out)
def SProj(c,p,ham,T,idx=slice(None),band=0,talk=False):
if not c.shape==p.shape and c.shape[0]==len(T):
raise ValueError('c, p, and T do not have consistent shapes.')
L = c.shape[0]
if not hasattr(band,'__len__'):
band = array([band])
nb = band.shape[0]
idx = range(L)[idx]
L = len(idx)
out = zeros((L,nb))
for j in range(L):
if talk:
print('step {} of {}'.format(j,L))
i = idx[j]
out[j] = SFrame(c[i],p[i],ham,T[i],band=band)
return out
"""
def Eproj(c,p,t,ham,T=None,band=5):
'''Project onto lattice eigenstates.'''
if not hasattr(band,'__len__'):
band = array([band])
N = c.shape[0]; n = (N-1)/2;
e,v = eigs1
|
# python class
# class Worker
# (_init_) means initialization
# self means 自己,本身 or instance本身
class Worker:
def __init__(self,name, pay):
self.name = name #self is the new object
self.pay = pay
def firstName(self):
return self.name.split()[0]
def lastName(self):
return self.name.split()[-1]#split string on blanks
def giveRaise(self, percent):
self.pay *= (1.0 + percent) #Update pay in-place
bob = Worker(input(), 20000)
print(bob.lastName())
print(bob.firstName())
bob.giveRaise(.20)
print(bob.pay) |
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
def ReadIris():
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
df.tail()
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0, 2]].values
return X, y
def ToStd(X):
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
return X_std
def ReadStdIrisTrainTest():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
# テストデータとトレージングデータに分割
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=0)
sc = StandardScaler()
# トレーニングデータの平均と標準偏差を計算
sc.fit((X_train))
# 標準化
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
return X_train_std, X_test_std, y_train, y_test
|
# wapf to find fact of an integer
def fact(num):
f = 1
for i in range(1, num+1):
f = f * i
return f
n = 12
r = 2
perm = fact(n) / fact(n-r)
comb = fact(n) / (fact(r) * fact(n-r))
print("perm = ", perm)
print("comb = ", comb)
# dev karo ek baar.. call karo baar baar
# DRY ==> dont repeat yourself |
#!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/9 20:02
import logging
fh = logging.FileHandler("mysql.log")
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(thread)d:%(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger = logging.getLogger('test')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(ch)
logger.debug("hello ?")
if __name__ == "__main__":
pass |
#######################################################################################################################
"""
# Exercise 1
Write a program which performs the following tasks:
1. Download the Movielens datasets from the url ‘http://files.grouplens.org/datasets/movielens/ml25m.zip’
2. Download the Movielens checksum from the url ‘http://files.grouplens.org/datasets/movielens/ml25m.zip.md5’
3. Check whether the checksum of the archive corresponds to the downloaded one
4. In case of positive check, print the names of the files contained by the downloaded archive
## Answer to Exercise 1
"""
from urllib.request import urlretrieve, urlopen
import zipfile as zf
import hashlib as hl
import io
import os
print("Start of Exercise 1.\n")
## 1.1. download ml-25m.zip
zip_url = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip'
zip_file_name = 'ml-25m.zip'
print("## 1.1. downloading '{}'...".format(zip_file_name))
urlretrieve(zip_url, zip_file_name)
print("## 1.1. '{}' has been downloaded in {}\n".format(zip_file_name, os.getcwd()))
## 1.2. download ml-25m.zip.md5
md5_url = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip.md5'
md5_file_name = 'ml-25m.zip.md5'
print("## 1.2. downloading '{}'...".format(md5_file_name))
urlretrieve(md5_url, md5_file_name)
print("## 1.2. '{}' has been downloaded in {}\n".format(md5_file_name, os.getcwd()))
## 1.3. checksum check
print("## 1.3. checksum check")
### 1.3.1. evaluate checksum of ml-25m.zip file
print("### 1.3.1. evaluate checksum of ml-25m.zip file")
ml_zip_resp = urlopen(zip_url)
ml_zip_file = zf.ZipFile(io.BytesIO(ml_zip_resp.read()), mode='r')
def get_md5_checksum(file_path):
with open(file_path, 'rb') as fh:
md5 = hl.md5()
while True:
data = fh.read(8192) # read in 8192-byte chunks
if not data:
break
md5.update(data)
return md5.hexdigest()
checksum = get_md5_checksum(file_path=os.path.join(os.getcwd(), zip_file_name))
print("The MD5 checksum of '{}' is '{}'.".format(zip_file_name, checksum))
### 1.3.2. get checksum in md5 file
print("### 1.3.2. get checksum in md5 file")
ml_zip_md5_req = urlopen(md5_url)
checksum_md5 = ml_zip_md5_req.read().decode().split(' ')[0]
print("The MD5 checksum stated in '{}' is '{}'.".format(md5_file_name, checksum_md5))
### 1.3.3. final check
print("### 1.3.3. final check")
if checksum == checksum_md5:
print('The two checksums match.\n')
else:
print('The two checksums do not match.\n')
## 1.4. print names of the files in archive
print("## 1.4. print names of the files in archive")
print("Here is the list of the names of the files of '{}'.\n{}\n".format(zip_file_name, ml_zip_file.namelist()))
print("End of Exercise 1.\n")
####################################################################################################################### |
from src.data_utility import download_test, process_data, vectorize_data, read_topics
from text_generator import text_generator_test
from keras.models import load_model
import os
import json
import pickle
import numpy as np
test_path = 'test/'
max_news_length = 300
#download_test(test_path)
#process_data(test_path, False)
#vectorize_data(test_path)
word_to_index_pickle_file = "dictionary.pickle"
database_path = 'train/'
if os.path.exists(word_to_index_pickle_file):
with open(word_to_index_pickle_file, "rb") as f:
word_to_index = pickle.load(f)
else:
word_to_index = json.loads(open("dictionary.json").read())
with open(word_to_index_pickle_file, "wb") as f:
pickle.dump(word_to_index, f)
dict_size = len(word_to_index.keys()) + 1
batch_size = 64
(topics, topic_index, topic_labels) = read_topics(database_path)
n_class = len(topics)
##------------------load model and predict -----------------------------##
model = load_model('bow_model.h5')
test_files = os.listdir(test_path + 'REUTERS_CORPUS_2/vectorized/')
test_files.sort()
test_steps = round(len(test_files) / batch_size) + 1
test_generator = text_generator_test(batch_size, max_news_length, test_path, test_files, True, dict_size)
prob_test = model.predict_generator(test_generator, test_steps)
thres = 0.3
pred_test = np.array(prob_test) > thres
# rows of the output matrix correspond to the alphabetical order of the test files
np.savetxt('results_bow.txt', pred_test, fmt='%d')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2019-10-25 03:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rbac', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='role',
old_name='permissons',
new_name='permissions',
),
]
|
import datetime
import mock
from odoo.tests import tagged, SingleTransactionCase
import logging
_logger = logging.getLogger(__name__)
@tagged('post_install', '-at_install', 'addon_hr_customizations', 'post_holiday_events')
class TestPostHolidayEvents(SingleTransactionCase):
@classmethod
def setUpClass(self):
super(TestPostHolidayEvents, self).setUpClass()
user_group_base = self.env.ref('base.group_user')
Users = self.env['res.users'].with_context({'no_reset_password': True})
company_id = self.env['res.company'].search([], limit=1)
self.user_employee = Users.create({
'name': 'HR Employee 1',
'login': 'hr_employee_1',
'email': 'hr_employee_1@example.com',
'company_id': company_id.id,
'company_ids': [(6, 0, [company_id.id])],
'groups_id': [(6, 0, [user_group_base.id])]
})
# here can test for managers, in this case with the user is enough
user_group_hr_user = self.env.ref('hr.group_hr_user')
# setup users
self.user_employee.write({'groups_id': [(4, user_group_hr_user.id)]})
# Create Employee
self.employee_1 = self.env['hr.employee'].create({
'first_name': 'Employee_1',
'last_name': 'T',
'private_email': 'testing_hiring@example.com',
'tz': 'UTC',
'user_id': self.user_employee.id,
})
# Create Holiday
self.holiday_1 = self.env['holidays.holiday'].create({
'name': 'Test Holiday 001',
'holiday_date': datetime.datetime.today(),
'country_name': self.env['res.country'].search([], limit=1).id,
})
self.fake_response_1 = {
'holiday': self.holiday_1.id,
'employee_id': self.employee_1.id,
'google_event_id': 'test01event'
}
def test_post_holiday_events(self):
_logger.info("[addon_hr_customizations] Post Holiday Events")
fake_response_google_event = mock.MagicMock()
fake_response_google_event.return_value = self.fake_response_1
with mock.patch('odoo.addons.addon_hr_customizations.models.hr_employee_work_location.HolidaysHoliday'
'.create_holiday_events_on_google_calendar', fake_response_google_event):
response = self.holiday_1.create_holiday_events_on_google_calendar()
_logger.info("[addon_hr_customizations] Return Value")
_logger.info("[addon_hr_customizations] Expected Response: %s" % response)
_logger.info("[addon_hr_customizations] Received Response: %s" % response)
self.assertEqual(self.fake_response_1, response)
_logger.info("[addon_hr_customizations] Post Holiday Events - SUCCEEDED")
|
from decimal import Decimal
from strawberry.utils.debug import pretty_print_graphql_operation
def test_pretty_print(mocker):
mock = mocker.patch("builtins.print")
pretty_print_graphql_operation("Example", "{ query }", variables={})
mock.assert_called_with("{ \x1b[38;5;125mquery\x1b[39m }\n")
def test_pretty_print_variables(mocker):
mock = mocker.patch("builtins.print")
pretty_print_graphql_operation("Example", "{ query }", variables={"example": 1})
mock.assert_called_with(
"{\n\x1b[38;5;250m "
'\x1b[39m\x1b[38;5;28;01m"example"\x1b[39;00m:\x1b[38;5;250m '
"\x1b[39m\x1b[38;5;241m1\x1b[39m\n}\n"
)
def test_pretty_print_variables_object(mocker):
mock = mocker.patch("builtins.print")
pretty_print_graphql_operation(
"Example", "{ query }", variables={"example": Decimal(1)}
)
mock.assert_called_with(
"{\n\x1b[38;5;250m "
'\x1b[39m\x1b[38;5;28;01m"example"\x1b[39;00m:\x1b[38;5;250m '
"\x1b[39m\x1b[38;5;124m\"Decimal('1')\"\x1b[39m\n}\n"
)
|
#-*-coding:utf8-*-
'''
Created on 2014-10-12
@author: Administrator
'''
#-*-coding:utf8-*-
import sys
import datetime
from xml.etree import ElementTree as ET
from com.util.pro_env import PROJECT_CONF_DIR
import os
if __name__ == '__main__':
reload(sys)
today = datetime.date.today()
yestoday = today + datetime.timedelta(-1)
#获得昨天的日期
dt = yestoday.strftime('%Y-%m-%d')
#加载主配置文件
xmlTree = ET.parse(PROJECT_CONF_DIR + "workflow.xml")
#获得所有task节点
workflow = xmlTree.findall('./task')
for task in workflow:
#获得模块名称
moduleName = task.text
if moduleName == "exe_hive":
#如果模块可以执行多个功能,则将task阶段的type属性一并拼装为shell
shell = "python " + moduleName + ".py " + task.attrib.get('type') + " " + dt
#执行shell
os.system(shell)
else :
shell = "python " + moduleName + ".py " + dt
os.system(shell) |
import scipy
import numpy
import configparser
from tkinter import filedialog
from collections import defaultdict
import pandas as pd
# Builds the state sets
def build_states():
state_file = filedialog.askopenfile(title="Select a State Configuration File", filetypes=(("hdf5 files", "*.ini"),
("all files", "*.*")))
config_state = configparser.ConfigParser()
config_state.read(state_file.name)
state_set = dict(config_state['STATES'])
return state_set
# Builds the transitions of interest for comparison to data
def transitions_interested():
state_file = filedialog.askopenfile(title="Select a State Configuration File", filetypes=(("hdf5 files", "*.ini"),
("all files", "*.*")))
config_state = configparser.ConfigParser()
config_state.read(state_file.name)
trans_int = dict(config_state['TRANSITIONS'])
return trans_int
def build_transitions(state_set): # builds the transition dictionary in general for the states described
state_file = filedialog.askopenfile(title="Select an Initial Guess File", filetypes=(("ini files", "*.ini"),
("all files", "*.*")))
config_state = configparser.ConfigParser()
config_state.read(state_file.name)
initial_guess = dict(config_state['INITIAL_GUESS'])
transition_list = []
transition_initial = []
for state in state_set:
for second_state in state_set:
if state != second_state:
transition_list.append(state + "_" + second_state)
for transition in transition_list:
if transition not in initial_guess.keys():
transition_initial.append(1)
else:
transition_initial.append(initial_guess[transition])
initial_trans_dict = dict(zip(transition_list, transition_initial))
return initial_trans_dict
def import_experimental(state):
filedialog.askopenfile(title="Select the experimental data file for " + str(transition) + "transition.",
filetypes=(("csv files", "*.csv"), ("all files", "*.*")))
# Varies the specified parameter by the input percentage and checks the output with the experimental data
# Main loop and logic for choosing parameters and step sizes
def main_loop():
state_friends = build_states()
print(state_friends)
print(transitions_interested())
print(build_transitions(state_friends))
main_loop()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.template.defaultfilters import slugify
from django_countries.fields import CountryField
from profiles.models import Profile
# Create your models here.
class PropertyManager(models.Manager):
def get_all_available_units_for_rent(self):
units_for_rent = self.filter(property_type='UN', is_rental=True, is_available=True, is_published=True)
return units_for_rent
def get_all_available_townhouses_for_rent(self):
townhouses_for_rent = self.filter(property_type='TW', is_rental=True, is_available=True, is_published=True)
return townhouses_for_rent
def get_all_available_houses_for_rent(self):
houses_for_rent = self.filter(property_type='HS', is_rental=True, is_available=True, is_published=True)
return houses_for_rent
def get_all_available_villas_for_rent(self):
villas_for_rent = self.filter(property_type='VL', is_rental=True, is_available=True, is_published=True)
return villas_for_rent
class Property(models.Model):
PROPERTY_CHOICES = (
('UN', 'Unit'),
('TW', 'TownHouse'),
('VL', 'Villa'),
('HS', 'House'),
)
name = models.CharField(max_length=200, blank=True, null=True)
is_available = models.BooleanField(default=False)
is_sale = models.BooleanField(default=False)
is_rental = models.BooleanField(default=False)
is_published = models.BooleanField(default=False)
available_date = models.DateField(null=True, blank=True)
price = models.CharField(max_length=50, blank=True, null=True)
summary = models.CharField(max_length=500, blank=True, null=True)
description = models.TextField(max_length=3000, blank=True, null=True)
address1 = models.CharField(max_length=100, blank=True, null=True)
address2 = models.CharField(max_length=100, blank=True, null=True)
address3 = models.CharField(max_length=100, blank=True, null=True)
city = models.CharField(max_length=100, blank=True, null=True)
state = models.CharField(max_length=50, blank=True, null=True)
country = CountryField(blank=True, null=True)
postcode = models.CharField(max_length=10, blank=True, null=True)
property_type = models.CharField(
max_length = 2,
choices = PROPERTY_CHOICES,
default = 'UN',
)
bedrooms = models.IntegerField(blank=True, null=True)
bathrooms = models.IntegerField(blank=True, null=True)
carparking = models.IntegerField(blank=True, null=True)
built_up_area = models.CharField(max_length=50, blank=True, null=True)
profile = models.ForeignKey(Profile, blank=True, null=True, related_name='properties', on_delete=models.CASCADE)
objects = PropertyManager()
def get_all_images(self):
images = self.images.all()
return images
def __str__(self):
return "%s - %s %s"%(self.name, self.profile.first_name, self.profile.last_name)
class Image(models.Model):
property = models.ForeignKey(Property, related_name='images')
file = models.ImageField(upload_to='static/images/')
position = models.PositiveSmallIntegerField(default=0)
class Meta:
ordering = ['position']
def __str__(self):
return '%s - %s'%(self.property.name, self.file)
|
# https://www.codewars.com/kata/rot13/
def rot13(s):
LOWER_A = ord('a')
LOWER_Z = ord('z')
def rot13_char(letter):
letter_n = ord(letter.lower())
letter_og = ord(letter)
if letter_n >= LOWER_A and letter_n <= LOWER_Z:
return (
chr(letter_og + 13)
if letter_n < LOWER_A + 13
else chr(letter_og - 13)
)
return letter
ans = ''
for letter in s:
ans += rot13_char(letter)
return ans
test = 'EBG13 rknzcyr.'
ans = rot13(test)
print('ans', ans)
|
""" Function implementations for standup feature """
from datetime import datetime
import message as msg
import source_data
import threading
import error
import time
all_channels = source_data.data["channels"]
all_users = source_data.data["users"]
all_messages = source_data.data["messages"]
def standup_start(token, channel_id, length):
source_data.valid_channel(channel_id)
matching_channel = source_data.get_channelinfo(channel_id)
if (matching_channel["standup"]["is_active"] == True):
raise error.InputError("Standup already active")
standup_start = int(datetime.now().timestamp())
standup_finish = standup_start + length
matching_channel["standup"]["is_active"] = True
matching_channel["standup"]["time_finish"] = standup_finish
timer = threading.Timer(length, finish_standup, args=[token, channel_id])
timer.start()
return {
'time_finish': standup_finish
}
def finish_standup(token, channel_id):
channel_info = source_data.get_channelinfo(channel_id)
standup_messages = channel_info["standup"]["standup_messages"]
packaged_msg = ""
for i in range(0, len(standup_messages)):
handle = standup_messages[i]["handle_str"]
message = standup_messages[i]["message"]
packaged_msg += f"{handle}: {message}" + ("\n" if i != (len(standup_messages) - 1) else "")
msg.message_send(token, channel_id, packaged_msg)
""" Edge case: If user who starts standup logs out before it ends
message_id = len(all_messages)
react = {'react_id': 1, 'u_ids': [], 'is_this_user_reacted': False}
new_msg = {}
new_msg['channel_id'] = channel_id
new_msg['message_id'] = message_id
new_msg['u_id'] = source_data.token2id(token)
new_msg['message'] = packaged_msg
new_msg['time_created'] = int(datetime.now().timestamp())
new_msg['reacts'] = [react]
all_messages.append(new_msg)
channel_info = source_data.get_channelinfo(channel_id)
channel_info['messages'].append(new_msg)
"""
for channel in all_channels:
if channel["channel_id"] == channel_id:
channel["standup"]["is_active"] = False
channel["standup"]["time_finish"] = None
channel["standup"]["standup_messages"].clear()
def standup_active(token, channel_id):
"""
Returns whether a standup in a given channel is active or not,
If it is active, returns end time. if not active, returns None for end time
Parameters:
- token: An authorisation hash for the user calling the functions
- channel_id (int): The id for the channel in which the function is being called
Return:
- is_active (bool): A true/false value for whether a standup is active or not
- time_finish (Unix timestamp): The time at which the standup will finish
Possible errors:
- Invalid channel_id (InputError) - Channel doesn't exist
- Invalid token (AccessError) - Caller has logged out
"""
# Error checking
check_invalid_token(token)
source_data.valid_channel(channel_id)
check_caller_not_member(token, channel_id)
# Returning if standup is active, and time_finish if any
standup_info = source_data.get_channelinfo(channel_id)["standup"]
if standup_info["is_active"]:
return {
"is_active": True,
"time_finish": standup_info["time_finish"]
}
else:
return {
"is_active": False,
"time_finish": None,
}
def standup_send(token, channel_id, message):
"""
Sends a message during a standup meeting in a channel that will be buffered
and put into a queue...
Parameters:
- token: An authorisation hash for the user calling the functions
- channel_id (int): The id for the channel in which the function is being called
- message (string): The message that will be sent during the standup
Return: No returns
Possible errors:
- Invalid channel_id (InputError) - Channel doesn't exist
- Message too long (InputError) - Message is over 1000 chars long
- No active standup (InputError) - No standup is currently active
- Not member (AccessError) - Caller is not a member of channel
- Invalid token (AccessError) - Caller has logged out
"""
# Error checking
check_invalid_token(token)
source_data.valid_channel(channel_id)
check_inactive_standup(channel_id)
check_caller_not_member(token, channel_id)
check_msg_exceeding_len(message)
# Appending message to standup dictionary in channel
msg_info = {
"handle_str": source_data.find_matching_user_dict_token(token)["handle_str"],
"message": message
}
for channel in all_channels:
if channel["channel_id"] == channel_id:
channel["standup"]["standup_messages"].append(msg_info)
#################################################################
### Error checking and helper functions for standup functions ###
#################################################################
def get_token_user_info(token):
""" Given a token, returns the corresponding user dictionary, if it exists """
caller = None
for user in all_users:
if user["token"] == token:
caller = user
return caller
def check_invalid_token(token):
""" Checks if a given token is invalid """
if get_token_user_info(token) is None:
raise error.AccessError("Invalid token")
def check_caller_not_member(token, channel_id):
""" Checks if the user corresponding with the given token is in the given channel """
token_u_id = get_token_user_info(token)["id"]
channel_members = source_data.get_channelinfo(channel_id)["members"]
is_member = False
for member in channel_members:
if member["u_id"] == token_u_id:
is_member = True
if not is_member:
raise error.AccessError(f"Not a member of channel {channel_id}")
def check_msg_exceeding_len(message):
""" Checks if the given message is over 1000 chars long """
if len(message) > 1000:
raise error.InputError("Message is over 1000 chars long")
def check_inactive_standup(channel_id):
""" Checks if there is no active standup within a channel """
channel_info = source_data.get_channelinfo(channel_id)
if not channel_info["standup"]["is_active"]:
raise error.InputError(f"There is no active standup in channel {channel_id}")
|
a=[1,2,3,4,5,6]
r = int(input("enter the index"))
try:
print(a[r])
except:
print("index out of range")
finally:
print("inside finally") |
from berserker.utils import maybe_download_unzip
from pathlib import Path
import tensorflow as tf
import numpy as np
ASSETS_PATH = str(Path(__file__).parent / 'assets')
_models_path = Path(__file__).parent / 'models'
from berserker.transform import batch_preprocess, batch_postprocess
MAX_SEQ_LENGTH = 512
SEQ_LENGTH = MAX_SEQ_LENGTH - 2
BATCH_SIZE = 8
def load_model(model_name=None, verbose=True, force_download=False):
maybe_download_unzip(
'https://github.com/Hoiy/berserker/releases/download/v0.1-alpha/1547563491.zip',
_models_path,
verbose,
force_download
)
def tokenize(text):
load_model()
texts = [text]
bert_inputs, mappings, sizes = batch_preprocess(texts, MAX_SEQ_LENGTH, BATCH_SIZE)
berserker = tf.contrib.predictor.from_saved_model(
str(_models_path / '1547563491')
)
bert_outputs = berserker(bert_inputs)
bert_outputs = [{'predictions': bo} for bo in bert_outputs['predictions']]
return batch_postprocess(texts, mappings, sizes, bert_inputs, bert_outputs, MAX_SEQ_LENGTH)[0]
|
#!/usr/bin/python2
import os
import sys
os.system("yum install hadoop -y")
os.system("yum install jdk -y")
os.system("rm -rf /data")
os.system("mkdir /data")
#hdfs
fh=open("/etc/hadoop/hdfs-site.xml","w")
x='''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<!--testing 1-->
<configuration>
<property>
<name>dfs.data.dir</name>
<value>/data</value>
</property>
</configuration>'''
# writing files
fh.write(x)
fh.close()
fo=open("/root/Desktop/task.txt","r")
i=fo.readline()
i=i.strip()
print(i)
fo.close()
#core
fh=open("/etc/hadoop/core-site.xml","w")
x='''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--testing 1-->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://'''+i+''':9001</value>
</property>
</configuration>'''
fh.write(x)
fh.close()
os.system("hadoop-daemon.sh stop datanode")
os.system("hadoop-daemon.sh start datanode")
|
from trip_builder import TripBuilder
from charger_context import ChargerContext
from trip import Stop
from routing import Osrm
from trip import Coordinate
from trip import RoadSegment
from trip import ChargerConnection
from routing import Route
from ..utility import RoundUp
class DistanceTripBuilder(TripBuilder, object):
def __init__(self, distances, hasDestinationCharger=None):
self.NumberOfStops = len(distances) + 1
self.Distances = distances
self.HasDestinationCharger = hasDestinationCharger
self.ChargerContext = ChargerContext()
super(DistanceTripBuilder, self).__init__()
def GetNumberOfStops(self):
return self.NumberOfStops
def GetHasDestinationCharger(self):
return self.HasDestinationCharger
def GetRoute(self):
route = [Stop(0, "Start", 0, 0, 0, ChargerConnection(0,0))]
if self.HasDestinationCharger:
for i, stop in enumerate(self.Distances):
chargerConnection = ChargerConnection(0.13, 50, 125, 400)
distanceFromPrevious, durationFromPrevious = stop[0], stop[1]
energyExpended = RoundUp(self.Vehicle.Drive(RoadSegment(distanceFromPrevious, durationFromPrevious, 0)))
route.append(Stop(stop, 'Charger_{0}'.format(i) , energyExpended, distanceFromPrevious, self.ConvertToTimeBlock(durationFromPrevious), chargerConnection))
else:
for i, stop in enumerate(self.Distances):
chargerConnection = ChargerConnection(0.13, 50, 125, 400)
distanceFromPrevious, durationFromPrevious = stop[0], stop[1]
energyExpended = RoundUp(self.Vehicle.Drive(RoadSegment(distanceFromPrevious, durationFromPrevious, 0)))
route.append(Stop(stop, 'Charger_{0}'.format(i) , energyExpended, distanceFromPrevious, self.ConvertToTimeBlock(durationFromPrevious), chargerConnection))
chargerConnection = ChargerConnection(0.13, 50, 125, 400)
distanceFromPrevious, durationFromPrevious = self.Distances[-1][0], self.Distances[-1][1]
energyExpended = RoundUp(self.Vehicle.Drive(RoadSegment(distanceFromPrevious, durationFromPrevious, 0)))
route.append(Stop(stop, 'Destination', energyExpended, distanceFromPrevious, self.ConvertToTimeBlock(durationFromPrevious), chargerConnection))
return Route(route, '', []) |
# coding=utf-8
"""
ZeldaPlayer Module
"""
import pygame
from pygame.locals import (K_UP, K_DOWN, K_LEFT, K_RIGHT, RLEACCEL)
from base.abstract_sprite_manager import AbstractSpriteManager
from base.settings import FileUtil
class PlayerSpritesImages(AbstractSpriteManager):
""" Images to sprite """
def __init__(self, screen, pos=(0, 0), scale=1.0, color_key=(116, 116, 116)):
super(PlayerSpritesImages, self).__init__()
self._color_key = color_key
self._filename = '../sprites/aula05-spritesheet.png'
self._scale = scale
# transform to scale
w, h = screen.get_size()
kwargs = {'topleft': (w / 2 - 16, h / 2 - 16)}
if pos != (0, 0):
kwargs = {'topleft': pos}
self._load_sprites().set_index((0, 1)).prepare_image(**kwargs)
def _load_sprites(self):
# constants
size = (16, 16)
basic_movement = ((1, 11), (18, 11), (35, 11), (52, 11), (69, 11), (86, 11))
# get sprites
img = FileUtil(self._filename).get()
img = pygame.image.load(img).convert_alpha()
img.set_colorkey(self._color_key, RLEACCEL)
for pos in basic_movement:
rect = (pos[0], pos[1], size[0], size[1])
self._sprites.append(img.subsurface(rect))
# Add left movement
for pos in basic_movement:
rect = (pos[0], pos[1], size[0], size[1])
if pos[0] in [35, 52]:
# Flip the image to left movement
self._sprites.append(pygame.transform.flip(img.subsurface(rect), True, False))
return self
class PlayerLimitsRules:
""" Rules to player walk limits """
def __init__(self, rect, screen):
self._screen = screen
self._rect = rect
def check(self):
""" Check rules """
w, h = self._screen.get_size()
if self._rect.top < 0 - self._rect.height:
self._rect.top = h - self._rect.height
elif self._rect.top > h:
self._rect.top = 0 - self._rect.height
elif self._rect.left < 0 - self._rect.width:
self._rect.left = w - self._rect.width
elif self._rect.left > w:
self._rect.left = 0 - self._rect.width
class ZeldaPlayer(pygame.sprite.Sprite):
""" New Class """
def __init__(self, screen, sprites, speed=4, scale=1.0):
super(ZeldaPlayer, self).__init__()
self._scale = scale
self._speed = speed
self._sprites = sprites
self._screen = screen
# image load
self._sprite = PlayerSpritesImages(screen=screen, scale=1.8)
self.image, self.rect = self._sprite.image_rect
self.rect_undo = self.rect.copy()
# sprites groups
self._add_sprites()
def _add_sprites(self):
""" Add object to sprites. """
for s in self._sprites:
s.add(self)
return self
def _update_image_rect(self, *pos):
self.image, self.rect = self._sprite.set_index(*pos).prepare_image(topleft=self.rect.topleft).image_rect
return self
def move(self, pressed_keys):
""" Set player movement """
self.rect_undo = self.rect.copy()
if pressed_keys[K_DOWN]:
self._update_image_rect((0, 1))
self.rect.move_ip(0, self._speed)
if pressed_keys[K_RIGHT]:
self._update_image_rect((2, 3))
self.rect.move_ip(self._speed, 0)
if pressed_keys[K_UP]:
self._update_image_rect((4, 5))
self.rect.move_ip(0, -self._speed)
if pressed_keys[K_LEFT]:
self._update_image_rect((6, 7))
self.rect.move_ip(-self._speed, 0)
self._check_limits()
return self
def _check_limits(self):
""" Check borders limits to _player """
PlayerLimitsRules(self.rect, self._screen).check()
return self
def undo(self):
""" Undo position """
self.rect = self.rect_undo.copy()
return self
def update(self, *args, **kwargs) -> None:
""" Update """
pressed_keys = kwargs.get('pressed_keys')
if pressed_keys:
self.move(pressed_keys)
|
import math
for i in range(10) :
print(i)
drinks = {
'martini': {'vodka', 'vermouth'},
'black russian': {'vodka', 'kahlua'},
'white russian': {'cream', 'kahlua', 'vodka'},
'manhattan': {'rye', 'vermouth', 'bitters'},
'screwdriver': {'orange juice', 'vodka'}
}
for n, c in drinks.items():
if 'cream' in c:
print(n,c)
|
from faker import Faker
import numpy as np
import os
import random
import scipy.stats as stats
import Consts
from DriverModel import IDM, TruckPlatoon
from Utils import MixtureModel
from Vehicle import Car, Truck, PlatoonedTruck
class Garage(object):
def __init__(self, seed, short_seed, car_pct, truck_pct, car_length,
truck_length, platoon_chance, min_platoon_length,
max_platoon_length, min_platoon_gap, max_platoon_gap):
self._seed = seed
self._short_seed = short_seed
self._car_pct = car_pct
self._car_velocities = None
self._car_gaps = None
self._car_length = car_length
self._generated_car_velocities = []
self._generated_car_gaps = []
self._truck_pct = truck_pct
self._truck_velocities = None
self._truck_gaps = None
self._truck_length = truck_length
self._generated_truck_velocities = []
self._generated_truck_gaps = []
self._truck_unloaded_weights = None
self._truck_loaded_weights = None
self._truck_weights = None
self._generated_truck_weights = []
self._platoon_pct = platoon_chance
self._min_platoon_length = min_platoon_length
self._max_platoon_length = max_platoon_length
self._platoon_lengths = random.Random(seed)
self._min_platoon_gap = min_platoon_gap
self._max_platoon_gap = max_platoon_gap
self._platoon_gaps = random.Random(seed)
self._platoon_loading = random.Random(seed)
self._random = random.Random(seed)
self._uuid_generator = Faker()
self._uuid_generator.seed_instance(seed)
self._cars = 0
self._trucks = 0
self._truck_platoons = 0
if Consts.DEBUG_MODE:
self._debug_file = open('debug/garage.txt', 'w')
path = 'output/{}/{}{}'
if os.path.isdir(path.format(Consts.BASE_OUTPUT_DIR, self._seed, '')):
counter = 0
while os.path.isdir(path.format(Consts.BASE_OUTPUT_DIR, self._seed, ':{}'.format(counter))):
counter += 1
self.path = path.format(Consts.BASE_OUTPUT_DIR, self._seed, ':{}'.format(counter))
else:
self.path = path.format(Consts.BASE_OUTPUT_DIR, self._seed, '')
def configure_car_velocities(self, car_speed, car_speed_variance, car_speed_dist):
car_min_speed = (1 - (car_speed_variance / 100))
car_max_speed = (1 + (car_speed_variance / 100))
if car_speed_variance > 0 and car_speed_dist == 0:
car_std_speed = ((car_speed * car_max_speed) - (
car_speed * car_min_speed)) / 4
self._car_velocities = stats.truncnorm(
((car_speed * car_min_speed) - car_speed) / car_std_speed,
((car_speed * car_max_speed) - car_speed) / car_std_speed,
loc=car_speed, scale=car_std_speed)
elif car_speed_variance == 0 or car_speed_dist == 1:
self._car_velocities = stats.uniform(
loc=(car_speed * car_min_speed),
scale=(car_speed * car_max_speed) - car_speed)
else:
raise RuntimeError('Could not configure car velocities with the '
'given settings!')
self._car_velocities.random_state = np.random.RandomState(
seed=self._short_seed)
def configure_car_gaps(self, car_gap, car_gap_variance, car_gap_dist):
car_min_gap = (1 - (car_gap_variance / 100))
car_max_gap = (1 + (car_gap_variance / 100))
if car_gap_variance > 0 and car_gap_dist == 0:
car_std_gap = ((car_gap * car_max_gap) - (
car_gap * car_min_gap)) / 4
self._car_gaps = stats.truncnorm(
((car_gap * car_min_gap) - car_gap) / car_std_gap,
((car_gap * car_max_gap) - car_gap) / car_std_gap,
loc=car_gap, scale=car_std_gap
)
elif car_gap_variance == 0 or car_gap_dist == 1:
self._car_gaps = stats.uniform(
loc=(car_gap * car_min_gap),
scale=(car_gap * car_max_gap) - car_gap)
else:
raise RuntimeError('Could not configure car minimum gaps with the '
'given settings!')
self._car_gaps.random_state = np.random.RandomState(
seed=self._short_seed)
def configure_truck_velocities(self, truck_speed, truck_speed_variance, truck_speed_dist):
truck_min_speed = (1 - (truck_speed_variance / 100))
truck_max_speed = (1 + (truck_speed_variance / 100))
if truck_speed_variance > 0 and truck_speed_dist == 0:
truck_std_speed = ((truck_speed * truck_max_speed) - (truck_speed * truck_min_speed)) / 4
self._truck_velocities = stats.truncnorm(
((truck_speed * truck_min_speed) - truck_speed) / truck_std_speed,
((truck_speed * truck_max_speed) - truck_speed) / truck_std_speed,
loc=truck_speed, scale=truck_std_speed)
elif truck_speed_variance == 0 or truck_speed_dist == 1:
self._truck_velocities = stats.uniform(
loc=(truck_speed * truck_min_speed),
scale=(truck_speed * truck_max_speed) - truck_speed)
else:
raise RuntimeError('Could not configure truck velocities with the '
'given settings!')
self._truck_velocities.random_state = np.random.RandomState(
seed=self._short_seed)
def configure_truck_gaps(self, truck_gap, truck_gap_variance, truck_gap_dist):
truck_min_gap = (1 - (truck_gap_variance / 100))
truck_max_gap = (1 + (truck_gap_variance / 100))
if truck_gap_variance > 0 and truck_gap_dist == 0:
truck_std_gap = ((truck_gap * truck_max_gap) - (
truck_gap * truck_min_gap)) / 4
self._truck_gaps = stats.truncnorm(
((truck_gap * truck_min_gap) - truck_gap) / truck_std_gap,
((truck_gap * truck_max_gap) - truck_gap) / truck_std_gap,
loc=truck_gap, scale=truck_std_gap
)
elif truck_gap_variance == 0 or truck_gap_dist == 1:
self._truck_gaps = stats.uniform(
loc=(truck_gap * truck_min_gap),
scale=(truck_gap * truck_max_gap) - truck_gap)
else:
raise RuntimeError('Could not configure truck minimum gaps with '
'the given settings!')
self._truck_gaps.random_state = np.random.RandomState(
seed=self._short_seed)
def configure_truck_weights(self, unloaded_weight, loaded_weight,
unloaded_variance, loaded_variance):
unloaded_min = (1 - (unloaded_variance / 100))
unloaded_max = (1 + (unloaded_variance / 100))
loaded_min = (1 - (loaded_variance / 100))
loaded_max = (1 + (loaded_variance / 100))
if unloaded_variance > 0:
unloaded_std = ((unloaded_weight * unloaded_max) - (unloaded_weight * unloaded_min)) / 4
self._truck_unloaded_weights = stats.truncnorm(
((unloaded_weight * unloaded_min) - unloaded_weight) / unloaded_std,
((unloaded_weight * unloaded_max) - unloaded_weight) / unloaded_std,
loc=unloaded_weight, scale=unloaded_std
)
else:
self._truck_unloaded_weights = stats.uniform(
loc=(unloaded_weight * unloaded_min),
scale=(unloaded_weight * unloaded_max) - unloaded_weight
)
self._truck_unloaded_weights.random_state = np.random.RandomState(
seed=self._short_seed)
if loaded_variance > 0:
loaded_std = ((loaded_weight * loaded_max) - (loaded_weight * loaded_min)) / 4
self._truck_loaded_weights = stats.truncnorm(
((loaded_weight * loaded_min) - loaded_weight) / loaded_std,
((loaded_weight * loaded_max) - loaded_weight) / loaded_std,
loc=loaded_weight, scale=loaded_std
)
else:
self._truck_loaded_weights = stats.uniform(
loc=(loaded_weight * loaded_min),
scale=(loaded_weight * loaded_max) - loaded_weight
)
self._truck_loaded_weights.random_state = np.random.RandomState(
seed=self._short_seed)
self._truck_weights = MixtureModel([self._truck_unloaded_weights,
self._truck_loaded_weights])
self._truck_weights.random_state = np.random.RandomState(
seed=self._short_seed)
def new_vehicle(self):
if self._random.randint(0, 100) < self._car_pct:
vel = float(self._car_velocities.rvs(1)[0])
gap = float(self._car_gaps.rvs(1)[0])
new_vehicle = Car(self._uuid_generator.uuid4(), vel, 0.73, 1.67,
gap, self._car_length, IDM, 2000)
self._cars += 1
self._generated_car_velocities.append(vel)
self._generated_car_gaps.append(gap)
else:
vel = float(self._truck_velocities.rvs(1)[0])
gap = float(self._truck_gaps.rvs(1)[0])
if self._random.randint(0, 100) < self._platoon_pct:
new_vehicle = []
platoon_gap = self._platoon_gaps.uniform(self._min_platoon_gap,
self._max_platoon_gap)
platoon_length = self._platoon_lengths.randint(
self._min_platoon_length, self._max_platoon_length)
platoon_full = bool(self._platoon_loading.getrandbits(1))
for i in range(platoon_length):
if platoon_full:
weight = float(self._truck_loaded_weights.rvs(1)[0])
else:
weight = float(self._truck_unloaded_weights.rvs(1)[0])
new_vehicle.append(
PlatoonedTruck(self._uuid_generator.uuid4(), vel,
0.73, 1.67, gap, self._truck_length,
TruckPlatoon, weight, i == 0,
platoon_gap))
self._trucks += 1
self._truck_platoons += 1
else:
weight = float(self._truck_weights.rvs(1)[0])
new_vehicle = Truck(self._uuid_generator.uuid4(), vel, 0.73,
1.67, gap, self._truck_length, IDM, weight)
self._trucks += 1
self._generated_truck_velocities.append(vel)
self._generated_truck_gaps.append(gap)
self._generated_truck_weights.append(weight)
if Consts.DEBUG_MODE:
if type(new_vehicle) is not list:
self._debug_file.write('{}\n'.format(new_vehicle.__str__()))
else:
self._debug_file.write('[{}]\n'.format(','.join(x.__str__() for x in new_vehicle)))
return new_vehicle
def plot(self):
os.makedirs(self.path, exist_ok=True)
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['axes.titlepad'] = 40
f, axarr = plt.subplots(3, 2, squeeze=False)
if self._generated_car_velocities:
axarr[0, 0].hist(self._generated_car_velocities, density=True, ec="k")
axarr[0, 0].set_xlabel('Desired Car Velocity (m/s)')
axarr[0, 0].set_ylabel('Density')
if self._generated_car_gaps:
axarr[0, 1].hist(self._generated_car_gaps, density=True, ec="k")
axarr[0, 1].set_xlabel('Desired Car Minimum Gap (m)')
axarr[0, 1].set_ylabel('Density')
if self._generated_truck_velocities:
axarr[1, 0].hist(self._generated_truck_velocities, density=True, ec="k")
axarr[1, 0].set_xlabel('Desired Truck Velocity (m/s)')
axarr[1, 0].set_ylabel('Density')
if self._generated_truck_gaps:
axarr[1, 1].hist(self._generated_truck_gaps, density=True, ec="k")
axarr[1, 1].set_xlabel('Desired Truck Minimum Gap (m)')
axarr[1, 1].set_ylabel('Density')
if self._generated_truck_weights:
axarr[2, 0].hist(self._generated_truck_weights, density=True, ec="k")
axarr[2, 0].set_xlabel('Truck Wights (m)')
axarr[2, 0].set_ylabel('Density')
f.suptitle('Data from Vehicle Generation', fontsize=12, y=0.99)
plt.subplots_adjust(top=0.85)
plt.tight_layout()
f.set_size_inches(16, 9)
plt.savefig('{}/garage.png'.format(self.path), dpi=300)
plt.close(f)
|
from django.http import HttpResponse, JsonResponse, FileResponse
# Create your views here.
from rest_framework.views import APIView
from api.serializers import *
from api.models import *
import hashlib
from rest_framework.parsers import MultiPartParser
import hashlib
import datetime
import os
def getHash(f):
line = f.readline()
hash = hashlib.md5()
while (line):
hash.update(line)
line = f.readline()
return hash.hexdigest()
# Create your views here.
class BaseResponse(object):
def __init__(self):
self.code = 200
self.msg = ""
self.data = None
@property
def dict(self):
return self.__dict__
class GetList(APIView):
def get(self, request):
response = BaseResponse()
user_id = request.session['userid']
try:
note_list = Note.objects.filter(user_id=user_id)
note_list = NoteSerializer(note_list,many=True)
response.data = note_list.data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class UpFile(APIView):
parser_classes = (MultiPartParser,)
# def get(self, request):
# response = BaseResponse()
# try:
# user_id = request.session['userid']
# # user_id = request.query_params.dict()["user_id"]
# filename = request.query_params.dict()["filename"]
# response.code = "200"
# response.msg = "ok"
# if user_id:
#
# filetype = request.query_params.dict()["type"]
# if filetype == "img":
# img_list = Img.objects.filter(user_id=user_id, filename=filename)
# if img_list:
# response.code = "201"
# response.msg = 'no'
# elif filetype == "doc":
# doc_list = Doc.objects.filter(user_id=user_id, filename=filename)
# if doc_list:
# response.code = "201"
# response.msg = 'no'
# elif filetype == "radio":
# radio_list = Radio.objects.filter(user_id=user_id, filename=filename)
# if radio_list:
# response.code = "201"
# response.msg = 'no'
# elif filetype == "video":
# video_list = Video.objects.filter(user_id=user_id, filename=filename)
# if video_list:
# response.code = "201"
# response.msg = 'no'
#
# return JsonResponse(response.dict)
# except Exception as e:
# print(e)
# response.msg = 'no'
# response.code = '201'
# response.data = "null"
# return JsonResponse(response.dict)
def post(self, request):
response = BaseResponse()
try:
img_list = ["jpg", "png", "img", "jpeg"]
doc_list = ["doc", "docx", "txt","md"]
video_list = ["mov", "flv", "mp4", "rmvb", "rm"]
radio_list = ["mp3", "midi", "wma"]
user_id = request.session['userid']
# user_id = request.data.get('user_id')
# filename = request.data.get("filename")
file = request.FILES.getlist("file")
print(file)
for afile in file:
filename=afile.name
print(filename)
filetype=afile.name.split(".")[-1]
md5 = getHash(afile)
response.code = "201"
response.msg = "no"
print(filetype)
if filetype in img_list:
img_list = Img.objects.filter(user_id=user_id, filename=filename)
print(img_list)
if img_list:
response.code = "201"
response.msg = 'no'
return JsonResponse(response.dict)
if Md5.objects.filter(md5=md5).count():
file_path = Img.objects.filter(md5_id=md5).first().path
Img.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type="img",
size=afile.size,
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Img.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type="img",
size=afile.size,
date=datetime.datetime.now())
user = User.objects.get(user_id=user_id)
user.size = user.size - afile.size
user.save()
response.code = "200"
response.msg = "ok"
response.data = "null"
elif filetype in doc_list:
doc_list = Doc.objects.filter(user_id=user_id, filename=filename)
if doc_list:
response.code = "201"
response.msg = 'no'
return JsonResponse(response.dict)
if Md5.objects.filter(md5=md5).count():
file_path = Doc.objects.filter(md5_id=md5).first().path
Doc.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type="doc",
size=afile.size,
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Doc.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type="doc",
size=afile.size,
date=datetime.datetime.now())
user = User.objects.get(user_id=user_id)
user.size = user.size - afile.size
user.save()
response.code = "200"
response.msg = "ok"
response.data = "null"
elif filetype in radio_list:
radio_list = Radio.objects.filter(user_id=user_id, filename=filename)
if radio_list:
response.code = "201"
response.msg = 'no'
return JsonResponse(response.dict)
if Md5.objects.filter(md5=md5).count():
file_path = Radio.objects.filter(md5_id=md5).first().path
Radio.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type="radio",
size=afile.size,
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Radio.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type="radio",
size=afile.size,
date=datetime.datetime.now())
user = User.objects.get(user_id=user_id)
user.size = user.size - afile.size
user.save()
response.code = "200"
response.msg = "ok"
response.data = "null"
elif filetype in video_list:
video_list = Video.objects.filter(user_id=user_id, filename=filename)
if video_list:
response.code = "201"
response.msg = 'no'
return JsonResponse(response.dict)
if Md5.objects.filter(md5=md5).count():
file_path = Video.objects.filter(md5_id=md5).first().path
Video.objects.create(filename=filename, md5_id=md5, user_id=user_id, path=file_path, type="video",
size=afile.size,
date=datetime.datetime.now())
else:
Md5.objects.create(md5=md5, filename=filename)
Video.objects.create(filename=filename, md5_id=md5, user_id=user_id,
path=request.FILES.get('file'), type="video",
size=afile.size,
date=datetime.datetime.now())
user = User.objects.get(user_id=user_id)
user.size = user.size - afile.size
user.save()
response.code = "200"
response.msg = "ok"
response.data = "null"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.msg = 'no'
response.code = '201'
response.data = "null"
return JsonResponse(response.dict)
class FileDownload(APIView):
def get(self, request):
response = BaseResponse()
type = request.query_params.dict()["type"]
user_id = request.session['userid']
filename = request.query_params.dict()["filename"]
# user_id = request.data.get("user_id")
try:
if type == "img":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
print(file_path)
file = open('/home/ubuntu/WeCloud/files/'+str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/force-download'
file_response['Content-Disposition'] = 'attachment;filename="'+filename+'"'
return file_response
elif type == "doc":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
file = open('/home/ubuntu/WeCloud/files/'+str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return file_response
elif type == "radio":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
file = open('/home/ubuntu/WeCloud/files/'+str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return file_response
elif type == "video":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
file = open('/home/ubuntu/WeCloud/files/'+str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/force-download'
file_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return file_response
# elif type == "coffer":
# file_path = Coffer.objects.filter(user_id=user_id, filename=filename).first().path
# file = open(str(file_path), 'rb')
# file_response = FileResponse(file)
# file_response['Content-Type'] = 'application/octet-stream'
# file_response['Content-Disposition'] = filename
# file_response['code'] = "200"
# file_response['msg'] = "ok"
# return file_response
elif type == "trash":
file_path = Img.objects.filter(user_id=user_id, filename=filename).first().path
file = open(str(file_path), 'rb')
file_response = FileResponse(file)
file_response['Content-Type'] = 'application/octet-stream'
file_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return file_response
except Exception as e:
print(e)
response.msg = 'no'
response.code = '201'
response.data = "null"
return JsonResponse(response.dict)
class GetFileByTime(APIView):
def get(self, request):
response = BaseResponse()
try:
data = []
user_id = request.session['userid']
# user_id = request.query_params.dict()["user_id"]
if user_id:
img_list = Img.objects.filter(user_id=user_id).order_by('-date')
doc_list = Doc.objects.filter(user_id=user_id).order_by('-date')
radio_list = Radio.objects.filter(user_id=user_id).order_by('-date')
video_list = Video.objects.filter(user_id=user_id).order_by('-date')
img_list = ImgSerializer(img_list, many=True)
doc_list = DocSerializer(doc_list, many=True)
radio_list = RadioSerializer(radio_list, many=True)
video_list = VideoSerializer(video_list, many=True)
for img in img_list.data:
data.append(img)
for doc in doc_list.data:
data.append(doc)
for radio in radio_list.data:
data.append(radio)
for video in video_list.data:
data.append(video)
if all == {}:
response.data = "null"
else:
response.data = data
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
response.data = "null"
return JsonResponse(response.dict)
class InsertCoffer(APIView):
def get(self, request):
response = BaseResponse()
try:
type = request.query_params.dict()["type"]
user_id = request.session['userid']
filename = request.query_params.dict()["filename"]
data = []
if user_id:
if type == "img":
file_path = Img.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Img.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Img.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
Img.objects.filter(user_id=user_id, filename=filename).delete()
elif type == "doc":
file_path = Doc.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Doc.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Doc.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
Doc.objects.filter(user_id=user_id, filename=filename).delete()
elif type == "radio":
file_path = Radio.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Radio.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Radio.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
Radio.objects.filter(user_id=user_id, filename=filename).delete()
elif type == "video":
file_path = Video.objects.filter(filename=filename, user_id=user_id).first().path
file_mad5 = Video.objects.filter(filename=filename, user_id=user_id).first().md5_id
file_size = Video.objects.filter(filename=filename, user_id=user_id).first().size
Coffer.objects.create(size=file_size, user_id=user_id, path=file_path, filename=filename,
md5_id=file_mad5, date=datetime.datetime.now(), type=type)
Video.objects.filter(user_id=user_id, filename=filename).delete()
coffer_list = Coffer.objects.all()
coffer_list = CofferSerializer(coffer_list, many=True)
response.data = coffer_list.data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class Restore(APIView):
def get(self, request):
response = BaseResponse()
try:
type = request.query_params.dict()["type"]
user_id = request.session['userid']
filename = request.query_params.dict()["filename"]
if type=="img":
img=Coffer.objects.filter(user_id=user_id,filename=filename).first()
Img.objects.create(filename=img.filename, md5_id=img.md5_id, user_id=user_id,
path=img.path, type="img",
size=img.size,
date=img.date)
elif type=="doc":
doc= Coffer.objects.filter(user_id=user_id, filename=filename).first()
Doc.objects.create(filename=doc.filename, md5_id=doc.md5_id, user_id=user_id,
path=doc.path, type="doc",
size=doc.size,
date=doc.date)
elif type=="video":
video = Coffer.objects.filter(user_id=user_id, filename=filename).first()
Video.objects.create(filename=video.filename, md5_id=video.md5_id, user_id=user_id,
path=video.path, type="video",
size=video.size,
date=video.date)
elif type=="radio":
radio = Coffer.objects.filter(user_id=user_id, filename=filename).first()
Radio.objects.create(filename=radio.filename, md5_id=radio.md5_id, user_id=user_id,
path=radio.path, type="radio",
size=radio.size,
date=radio.date)
Coffer.objects.filter(type=type, user_id=user_id, filename=filename).delete()
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class delNote(APIView):
def get(self, request):
response = BaseResponse()
data = []
user_id = request.session['userid']
try:
file_id = request.query_params.dict()["file_id"]
Note.objects.filter(file_id=file_id, user_id=user_id).delete()
note_list = Note.objects.filter(user_id=user_id)
note_list = NoteSerializer(note_list,many=True)
data=note_list.data
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class CreateNote(APIView):
def get(self,request):
response = BaseResponse()
try:
user_id = request.session['userid']
if user_id:
current_time=datetime.datetime.now()
note=Note.objects.create(title="", content="", user_id=user_id,
date=current_time)
response.code=200
response.msg="ok"
current_time = datetime.datetime.strftime(current_time,'%Y-%m-%d')
print(current_time)
data={
"file_id":note.file_id,
"date":current_time
}
response.data=data
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = 201
response.msg = "no"
response.data = "null"
return JsonResponse(response.dict)
class UpdateNote(APIView):
def post(self, request):
response = BaseResponse()
data = []
try:
user_id = request.session['userid']
file_id = request.data['data']["file_id"]
title = request.data['data']['title']
content = request.data['data']["content"]
Note.objects.filter(file_id=file_id, user_id=user_id).update(title=title, content=content,date=datetime.datetime.now())
note = Note.objects.get(file_id=file_id)
note= NoteSerializer(note)
data=note.data
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class GetNote(APIView):
def get(self, request):
response = BaseResponse()
data = []
try:
file_id = request.query_params.dict()["file_id"]
note_list = Note.objects.get(file_id=file_id)
note_list = NoteSerializer(note_list)
data=note_list.data
response.data = data
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class DeleteFile(APIView):
def post(self, request):
response = BaseResponse()
try:
user_id = request.session['userid']
print(request.data)
for filedata in request.data['data']['listData']:
trash = Trash.objects.filter(user_id=user_id, filename=filedata['filename'],
type=filedata['type']).first()
file_path = trash.path
if filedata['type']=="img":
count=Img.objects.filter(md5_id=trash.md5_id).count()
count+=Trash.objects.filter(md5_id=trash.md5_id).count()
if count==1:
os.remove("/home/ubuntu/WeCloud/files/"+file_path)
Md5.objects.filter(md5=trash.md5_id).delete()
elif filedata['type']=="doc":
count = Doc.objects.filter(md5_id=trash.md5_id).count()
count += Trash.objects.filter(md5_id=trash.md5_id).count()
if count == 1:
os.remove("/home/ubuntu/WeCloud/files/" + file_path)
Md5.objects.filter(md5=trash.md5_id).delete()
elif filedata['type']=="radio":
count = Radio.objects.filter(md5_id=trash.md5_id).count()
count += Trash.objects.filter(md5_id=trash.md5_id).count()
if count == 1:
os.remove("/home/ubuntu/WeCloud/files/" + file_path)
Md5.objects.filter(md5=trash.md5_id).delete()
elif filedata['type']=="video":
count = Video.objects.filter(md5_id=trash.md5_id).count()
count += Trash.objects.filter(md5_id=trash.md5_id).count()
if count == 1:
os.remove("/home/ubuntu/WeCloud/files/" + file_path)
Md5.objects.filter(md5=trash.md5_id).delete()
Trash.objects.filter(user_id=user_id, filename=filedata['filename'], type=filedata['type']).delete()
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class GotoTrash(APIView):
def post(self, request):
response = BaseResponse()
try:
user_id = request.session['userid']
files = request.data['data']['listData']
for file in files:
filename=file['filename']
print('filename:'+filename)
type=file['type']
print('type:'+type)
if user_id:
if type == 'img':
path = Img.objects.filter(user_id=user_id, filename=filename).first().path
date = Img.objects.filter(user_id=user_id, filename=filename).first().date
md5 = Img.objects.filter(user_id=user_id, filename=filename).first().md5_id
size = Img.objects.filter(user_id=user_id, filename=filename).first().size
Trash.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Img.objects.filter(user_id=user_id, filename=filename).delete()
elif type == 'doc':
path = Doc.objects.filter(user_id=user_id, filename=filename).first().path
date = Doc.objects.filter(user_id=user_id, filename=filename).first().date
md5 = Doc.objects.filter(user_id=user_id, filename=filename).first().md5_id
size = Doc.objects.filter(user_id=user_id, filename=filename).first().size
Trash.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Doc.objects.filter(user_id=user_id, filename=filename).delete()
elif type == 'video':
path = Video.objects.filter(user_id=user_id, filename=filename).first().path
date = Video.objects.filter(user_id=user_id, filename=filename).first().date
md5 = Video.objects.filter(user_id=user_id, filename=filename).first().md5_id
size = Video.objects.filter(user_id=user_id, filename=filename).first().size
Trash.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Video.objects.filter(user_id=user_id, filename=filename).delete()
elif type == 'radio':
path = Radio.objects.filter(user_id=user_id, filename=filename).first().path
date = Radio.objects.filter(user_id=user_id, filename=filename).first().date
md5 = Radio.objects.filter(user_id=user_id, filename=filename).first().md5_id
size = Radio.objects.filter(user_id=user_id, filename=filename).first().size
Trash.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Radio.objects.filter(user_id=user_id, filename=filename).delete()
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
class HuiFu(APIView):
def post(self, request):
response = BaseResponse()
try:
user_id = request.session['userid']
files = request.data['data']['listData']
for file in files:
filename = file['filename']
type = file['type']
if user_id:
path = Trash.objects.filter(user_id=user_id, filename=filename, type=type).first().path
date = Trash.objects.filter(user_id=user_id, filename=filename).first().date
md5 = Trash.objects.filter(user_id=user_id, filename=filename).first().md5_id
size = Trash.objects.filter(user_id=user_id, filename=filename).first().size
if type == 'img':
Img.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Trash.objects.filter(user_id=user_id, filename=filename, type=type).delete()
elif type == 'doc':
Doc.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Trash.objects.filter(user_id=user_id, filename=filename, type=type).delete()
elif type == 'radio':
Radio.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Trash.objects.filter(user_id=user_id, filename=filename, type=type).delete()
elif type == 'video':
Video.objects.create(user_id=user_id, filename=filename, path=path, date=date, md5_id=md5,
type=type, size=size)
Trash.objects.filter(user_id=user_id, filename=filename, type=type).delete()
response.code = "200"
response.msg = "ok"
return JsonResponse(response.dict)
except Exception as e:
print(e)
response.code = "201"
response.msg = "no"
return JsonResponse(response.dict)
|
# https://www.youtube.com/watch?v=5PusmXfZBKo
def soma_2_numeros(a,b):
print(f"a soma dos dois numeros é: {a + b}")
def soma_3_numeros(a, b, c):
print(f"a soma dos tres numeros é: {a + b + c}")
soma_2_numeros(41,1)
soma_3_numeros(39,1,2)
####
def soma(*numeros): #valores arbitrários #quem manda é o operador (*)
print(sum(numeros))
soma(20, 22)
soma(39, 10, 25)
soma(10, 20, 31, 44)
soma(11, 25, 30, 41, 55)
soma(1, 2, 3, 4, 5, 6, 7, 8, 9 ,10)
####
def f(*args):
print(f"\nargs = {args}") #armazena o conteudo da variavel args (cria uma tupla)
for arg in args:
print(arg)
f()
f(1)
f(1,2)
f(1, 2 , 3)
f("São Paulo", "Rio de Janeiro")
###
def filmes_favoritos(*filmes):
print("\n Meus Filmes Favoritos:")
for i, filme in enumerate(filmes, start=1):
print(f"\t{i}. {filme}")
filmes_favoritos("Velozes e Furiosos", "Carros", "Toy Story")
filmes_favoritos("Chamado da Floresta", "Star Wars")
###
def filmes_favoritos(nome, *filmes):
print(f"\nOs Filmes favoritos do(a) {nome}:")
for i, filme in enumerate(filmes, start=1):
print(f"\t{i}. {filme}")
filmes_favoritos("Brutus", "Jumanji", "Rambo", "Hora do Rush")
filmes_favoritos("Gertrudes", "Xuxa, na terra dos baixinhos", "Angry Birds", "Borat", "Matrix")
### Entendendo o ***Kwargs
def f(**kwargs):
print(f"\nkwargs = {kwargs}") #kw = keyword
for key, value in kwargs.items():
print(key, value)
f()
f(nome="Bruno")
f(nome= "Bruno", idade=35)
f(nome= "Bruno", idade=35, area=["Devops", "Infra", "Python", "Segurança"])
### Usando o kwargs
def favoritos(nome, **kwargs):
print(f"\nOs favoritos do(a) {nome}:")
for key, value in kwargs.items():
print(f"\t- {key.capitalize()}: {value}")
favoritos("Bruno", artista="Jeremy Camp", musica="I Still believe")
favoritos("Vanessa", filme="Mulan", artista="Pitty", comida="Frango com Quiabo"),
favoritos(
"Abigail",
Linguagem="Python",
Filme="Sonic",
comida="Batata Frita",
bebida="Leitinho"
)
##
def f(x, *args, **kwargs):
print(f"x = {x}\nargs = {args}\nkwargs = {kwargs}")
f(1, 2, 3, y=4, z=5)
### Dá para usar *args e **kwargs juntos
### Mas não dá para fazer de qualquer jeito # Tem a ordem certas, args antes de kwargs
perfil = {
"nome": "Bruno",
"idade": 35
}
print(perfil)
def f(**kwargs):
for key, value in kwargs.items():
print(key, value)
## unpacking
f(**perfil)
#####
filmes = ["Rocket Science", "Thumbsucker"]
print(*filmes)
#Na prática, é isso que acontece:print("Rocket Science", "Thumbsucker")
def f(*args):
for arg in args:
print(arg)
f(*filmes)
#####
lista = [1, 2, 3, 4]
primeiro, *o_que_sobrou = lista
print(primeiro)
print(o_que_sobrou)
####
lista = [1, 2, 3, 4]
primeiro, o_que_sobrou = lista[0], lista[1:]
print(primeiro)
print(o_que_sobrou)
######
lista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
primeiro, *elementos_do_meio, ultimo = lista
print(primeiro)
print(elementos_do_meio)
print(ultimo)
#### Assuntos próximos
def f(pos1, pos2, /, pos_or_kwd,*,kwd1, kwd2):
print(pos1, pos2, pos_or_kwd, kwd1, kwd2)
f(1,2, pos_or_kwd=3, kwd1=4, kwd2=5) |
import time
from selenium import webdriver
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
class Run():
def __init__(self):
self.statue = None
def check(self):
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument('--disable-gpu')
option.add_argument(
"user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'")
option.add_experimental_option('excludeSwitches', ['enable-automation'])
driver = webdriver.Chrome(executable_path=r'D:\Python\Pythoncode\爬虫软件\chromedriver_win32\chromedriver.exe',
options=option)
list_url = 'http://www.dahepiao.com/news1/2019010560825.html'
driver.get(list_url)
a = driver.find_elements_by_xpath('//div[@class="detail liebiao"]//div[@class="detail1"]//blockquote')[0]
s = a.text
b = s.split()
print(s)
for i in b[:1]:
if i == '许巍广州演唱会时间:待公开':
print('未放票')
self.statue = b
else:
print('放票了')
self.statue = b
driver.close()
def send_email(self):
my_sender = '1411789366@qq.com' # 发件人邮箱账号
my_pass = 'yusqrqfuagidihid' # 发件人邮箱密码
my_user = '1411789366@qq.com' # 收件人邮箱账号,我这边发送给自己
msg = MIMEText('{}'.format(self.statue), 'plain', 'utf-8')
msg['From'] = formataddr(["FromRunoob", my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr(["FK", my_user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = "许巍演唱会门票详情!" # 邮件的主题,也可以说是标题
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发
server.quit() # 关闭连接
def run(self):
# self.send_email(u"1411789366@qq.com", u"1411789366@qq.com", u"主题", u"yusqrqfuagidihid")
for i in range(1):
self.check()
# self.send_email()
time.sleep(10)
print(i)
a = Run()
a.run()
|
import os
import psycopg2
from flask import current_app
from decouple import config
DATABASE_URL = config("DATABASE_URL")
def add_champion(name, rank, level, star, siglevel, account):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"INSERT INTO champion (name, rank, level, star, siglevel, account) VALUES ('{name}',{rank},{level},{star}, {siglevel}, '{account}');")
def get_champion(ID, name, star, account):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
if ID is not None:
condition = f"ID = {ID}"
else:
condition = f"name = {name} AND star = {star} AND account = '{account}'"
cur.execute(f"SELECT * FROM champion WHERE {condition};")
champ = cur.fetchall()
return champ
def update_champion(column, value, condition):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"UPDATE champion SET '{column}' = {value} WHERE '{condition}';")
def remove_champion(ID):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"DELETE FROM champion WHERE ID = {ID};")
def add_synergy(type, rootchamp, targetchamp, effect):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"INSERT INTO synergy (type, rootchamp, targetchamp, effect) VALUES ({type},{rootchamp},{targetchamp},'{effect}');")
def get_synergy(ID, rootchamp, targetchamp):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
if ID is not None:
condition = f"ID = {ID}"
else:
condition = f"rootchamp = {rootchamp} AND targetchamp = {targetchamp}"
cur.execute(f"SELECT * FROM synergy WHERE '{condition}';")
def update_synergy(ID, rootchamp, targetchamp, newtext):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"UPDATE synergy SET effect = '{newtext}' WHERE rootchamp = {rootchamp} AND targetchamp = {targetchamp};")
def remove_synergy(ID):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"DELETE FROM synergy WHERE ID = {ID};")
def add_account(ID, password, email, accounttitle, accountlevel):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"INSERT INTO account (ID, password, email, accounttitle, accountlevel) VALUES ('{ID}','{password}','{email}','{accounttitle}',{accountlevel});")
def get_account(ID, email):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
if ID is not None:
condition = f"ID = '{ID}'"
else:
condition = f"email = '{email}'"
cur.execute(f"SELECT * FROM account WHERE '{condition}';")
def update_account(column, value, condition):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"UPDATE account SET '{column}' = {value} WHERE '{condition}';")
def remove_account(ID):
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as cur:
cur.execute(f"DELETE FROM account WHERE ID = '{ID}';")
|
from flask import Blueprint, render_template
from simpledu.models import User
from simpledu.models import Course
user = Blueprint('user', __name__, url_prefix='/user')
@user.route('/<user_name>')
def index(user_name):
users = User.query.filter_by(username=user_name).first_or_404()
courses = Course.query.all()
return render_template('detail.html', users=users, courses=courses)
|
#-------------------------------------#
# Python script for BEST address #
# Author: Marc Bruyland (FOD BOSA) #
# Contact: marc.bruyland@bosa.fgov.be #
# June 2019 #
#-------------------------------------#
from BEST_Lib import *
print('dicS..')
dicS = getDic(fDicStreets)
print('dicMapS..')
dicMapS = createMappingFileStreets(dicS)
print('saveDic(dicMapS, fMapStreetnames)..')
saveDic(dicMapS, fMapStreetnames)
print('dicMapS_RR..')
dicMapStreetsRR = convertStreetsRR(dicMapS)
print('saveDic(dicMapStreetsRR, fMapStreetnamesRR)..')
saveDic(dicMapStreetsRR, fMapStreetnamesRR)
print('dicA..')
dicA = getDic(fDicAddresses)
print('dicMapA..')
dicMapA = createMappingFileNumbers(dicA)
print('saveDic(dicMapA, fMapAddresses)..')
saveDic(dicMapA, fMapAddresses)
print('dicMapHs..')
isForRR = False
dicMapHs = createMappingFileHouseNrs(dicMapA, isForRR)
print('saveDic(dicMapHs, fMapHouseNrs)..')
saveDic(dicMapHs, fMapHouseNrs)
print('dicMapHs for RR ..')
isForRR = True
dicMapHs = createMappingFileHouseNrs(dicMapA, isForRR)
print('saveDic(dicMapHs, fMapHouseNrsRR)..')
saveDic(dicMapHs, fMapHouseNrsRR)
print('dicMapBx..')
isForRR = False
dicMapBx = createMappingFileBoxNrs(dicMapA, isForRR)
print('saveDic(dicMapBx, fMapBoxNrs)..')
saveDic(dicMapBx, fMapBoxNrs)
print('dicMapBx for RR ..')
isForRR = True
dicMapBx = createMappingFileBoxNrs(dicMapA, isForRR)
print('saveDic(dicMapBx, fMapBoxNrsRR)..')
saveDic(dicMapBx, fMapBoxNrsRR)
print('dicM..')
dicM = getDic(fDicMunicipalities)
print('dicMapMunToR..')
dicMapMunToR = createMappingFileMunToR(dicM)
print('saveDic(dicMapMunToR, fMapMunToR)..')
saveDic(dicMapMunToR, fMapMunToR)
print('creating streetcode mapping file..')
dic = createStreetCodeMappingFile(dicS, dicMapStreetsRR)
saveDic(dic, fMapStreetCode_RRtoBEST) |
import pygame
import sys
from projectile import Projectile
from alien import Alien
def check_keydown_events(ship, projectiles, event, screen, settings):
"""Helper function to check for KEYDOWN events and react to them"""
if event.key == pygame.K_ESCAPE:
sys.exit()
elif event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_projectile(screen, projectiles, ship, settings)
def check_keyup_events(ship, event):
"""Helper function to check for KEYUP events and react to them"""
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ship, projectiles, screen, settings):
"""Helper funtion to check for events and react to them"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(ship, projectiles, event, screen, settings)
elif event.type == pygame.KEYUP:
check_keyup_events(ship, event)
def update_screen(screen, ship, projectiles, settings, aliens):
"""Function that does all of the drawing on the screen on call"""
# Redrawing the screen background
screen.fill(settings.bg_color)
# Updating the ship rect before displaying it
ship.update()
# Drawing the ship on the screen
ship.displayShip()
# Updating the projectile info and displaying projectiles
update_projectiles(screen, projectiles, settings)
# Drawing the alien on top layer
aliens.update()
aliens.draw(screen)
# Updating the frame
pygame.display.flip()
def update_projectiles(screen, projectiles, settings):
"""Function to update most of the info about projectiles"""
# Updating the projectile rect before displaying it
projectiles.update()
# Drawing the projectiles on the screen
for projectile in projectiles.sprites():
projectile.draw()
# Removing used projectiles
for projectile in projectiles.copy():
if projectile.rect.bottom <= 0:
projectiles.remove(projectile)
def fire_projectile(screen, projectiles, ship, settings):
if len(projectiles) < settings.magazine:
new_projectile = Projectile(settings, screen, ship)
projectiles.add(new_projectile)
def get_alien_row_num(screen, ship, settings):
# """Get the number of rows availible to add on screen"""
margin_alien = Alien(screen, settings)
alien_height = margin_alien.rect.height
ship_height = ship.rect.height
available_space_y = (settings.screen_height -
(3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def get_num_of_aliens(screen, settings):
margin_alien = Alien(screen, settings)
availible_space_x = settings.screen_width - (margin_alien.rect.width * 2)
num_of_aliens_x = round(availible_space_x / (margin_alien.rect.width * 2))
return num_of_aliens_x
def create_alien(screen, settings, alien_index, row_number = 0):
alien = Alien(screen, settings)
alien.x = alien.rect.width + (2 * alien.rect.width * alien_index)
alien.y = alien.rect.height + (2 * alien.rect.height * row_number)
alien.rect.x = alien.x
alien.rect.y = alien.y
return alien
def create_alien_row(aliens, screen, settings, row_number = 0):
num_of_aliens_x = get_num_of_aliens(screen, settings)
for alien_index in range(num_of_aliens_x):
alien = create_alien(screen, settings, alien_index, row_number)
aliens.add(alien)
def create_alien_army(aliens, ship, screen, settings):
num_of_alien_rows = get_alien_row_num(screen, ship, settings)
num_of_alien_rows -= 1
for row_number in range(num_of_alien_rows):
create_alien_row(aliens, screen, settings, row_number=row_number)
|
import _thread
import pycom
import socket
import time
import machine
import ubinascii
import gc
import ujson
import os
from utils import Utils as utils
from network import LoRa, WLAN
from machine import SD
from L76GNSS import L76GNSS
from pytrack import Pytrack
LORA_BAT_PSU = 0
LORA_BAT_CANNOT_MEASURE = 255
# Hardware setup
py = Pytrack()
# Software setup
gc.enable()
thread_list = {}
thread_list_mutex = _thread.allocate_lock()
conf = {}
def printt(*args, **kwargs):
global thread_list
try:
print("[" + thread_list[_thread.get_ident()] + "] " + " ".join(map(str, args)), **kwargs)
except KeyError:
pass
def load_config(filename):
global conf
try:
f = open(filename, 'r')
conf = ujson.load(f)
f.close()
except OSError:
print("Cannot load config (%s), please upload before proceeding!" % (filename))
while (True):
time.sleep(1)
def lora_set_battery():
voltage = py.read_battery_voltage()
lora_batery_state = LORA_BAT_CANNOT_MEASURE
if voltage > 4.3:
lora_batery_state = LORA_BAT_PSU
printt("[PSU] Voltage %0.2f V" % (voltage))
else:
lora_batery_state = int(utils.map(voltage, 3.6, 4.3, 1, 254))
printt("[BAT] Voltage %0.2f V mapped to %d" % (voltage, lora_batery_state))
def lora_task():
with thread_list_mutex:
thread_list[_thread.get_ident()] = 'LORA'
# Initialise LoRa in LORAWAN mode.
# Please pick the region that matches where you are using the device:
# Asia = LoRa.AS923
# Australia = LoRa.AU915
# Europe = LoRa.EU868
# United States = LoRa.US915
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868, tx_power=14)
printt("DevEUI: %s" % (ubinascii.hexlify(lora.mac()).decode('ascii')))
# create an OTAA authentication parameters
app_eui = ubinascii.unhexlify(conf['lora']['app_eui'])
app_key = ubinascii.unhexlify(conf['lora']['app_key'])
printt("Joining LORAWAN with EUI: %s and KEY: %s" % (conf['lora']['app_eui'], conf['lora']['app_key']))
# join a network using OTAA (Over the Air Activation)
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
lora_setup_done = False
while (True):
# Set LORA battery state
lora_set_battery()
# wait until the module has joined the network
if (not lora.has_joined()):
pycom.rgbled(0x100000)
printt('Joining network...')
time.sleep(2.5)
else:
if (not lora_setup_done):
# Succesfully joined
pycom.rgbled(0x001000)
# create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
lora_setup_done = True
# make the socket blocking
# (waits for the data to be sent and for the 2 receive windows to expire)
s.setblocking(True)
# send some data
s.send(bytes([0x01, 0x02, 0x03]))
# make the socket non-blocking
# (because if there's no data received it will block forever...)
s.setblocking(False)
# get any data received (if any...)
data = s.recv(64)
print(data)
time.sleep(0.1)
def gnss_task():
sd_mounted = False
with thread_list_mutex:
thread_list[_thread.get_ident()] = 'GNSS'
# Mount SD if possible
sd = SD()
try:
os.mount(sd, '/sd')
os.listdir('/sd')
sd_mounted = True
except OSError:
pass
gnss = L76GNSS(py, timeout=5)
while True:
coord = gnss.rmc()
printt(coord)
# if sd_mounted:
# logfile = open('/sd/gnss.txt', 'a')
# logfile.write(logstring)
# logfile.close()
time.sleep(2)
def system_task():
with thread_list_mutex:
thread_list[_thread.get_ident()] = 'SYST'
while True:
gc.collect()
time.sleep(2)
print('LORA GPS TRACKER APPLICATION')
time.sleep(2)
# Load configuration file from flas
load_config('config/app.json')
# Check if we need wifi
if conf['wifi']['enabled']:
wlan = WLAN(mode=WLAN.STA)
nets = wlan.scan()
for net in nets:
if (net.ssid == conf['wifi']['ssid']):
print('Network found: %s' % (conf['wifi']['ssid']))
wlan.connect(net.ssid, auth=(net.sec, conf['wifi']['key']), timeout=5000)
while not wlan.isconnected():
machine.idle() # save power while waiting
print('WLAN connection succeeded!')
break
# Start processing threads
_thread.stack_size(32768)
_thread.start_new_thread(lora_task, ())
_thread.start_new_thread(gnss_task, ())
_thread.start_new_thread(system_task, ())
gc.collect()
while (True):
try:
time.sleep(0.1)
except KeyboardInterrupt:
print("received keyboard interrupt")
except:
print("Got another interrupt") |
from flask import Flask
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager
from flask.ext.sqlalchemy import SQLAlchemy
from config import Configuration # import out configuration data.
app = Flask(__name__)
app.config.from_object(Configuration) #use values from out Configuration object
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
|
from ..element.validator import Validator as ElementValidator
from ...elements.button import Button
class Validator(ElementValidator):
@staticmethod
def validate(element, selector):
if element.tag_name.lower() not in ['input', 'button']:
return None
# TODO - Verify this is desired behavior based on
# https://bugzilla.mozilla.org/show_bug.cgi?id=1290963
if element.tag_name.lower() == 'input' and element.get_attribute(
'type').lower() not in Button.VALID_TYPES:
return None
return element
|
def two_sum(arr, k):
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if (arr[i] + arr[j] == k):
return True
return False
def two_sum_one_pass(arr, k):
for i in range(len(arr)):
complement = k - arr[i]
if complement in arr[i+1:]:
return True
return False
if __name__ == '__main__':
arr = [1,2,3,4,5]
assert(two_sum(arr, 9) == True)
assert(two_sum(arr, 15) == False)
assert(two_sum_one_pass(arr, 9) == True)
assert(two_sum_one_pass(arr, 15) == False) |
def vectormachine():
from sklearn.datasets import load_iris # importing datasets
from sklearn.utils import shuffle # to shuffle the datasets
from sklearn.model_selection import train_test_split # to split the datasets
from sklearn.svm import SVC
from sklearn.metrics import classification_report,confusion_matrix
from sklearn import metrics # to check the accuracy
iris=load_iris() # call the class
# print(iris)
x=iris['data']
y=iris['target']
# print(x)
# print(y)
x,y=shuffle(x,y,random_state=0) # random shuffle
# print(x)
# print(y)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.50)
# print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
svcclassifier=SVC(kernel='linear')
svcclassifier.fit(x_train,y_train)
y_prediction=svcclassifier.predict(x_test)
# print(confusion_matrix(y_test,y_prediction))
# print(classification_report(y_test,y_prediction))
# print('SVM Model accuracy (in %):',metrics.accuracy_score(y_test,y_prediction)*100)
abc2=metrics.accuracy_score(y_test,y_prediction)*100
return(abc2)
c=vectormachine() |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.neighbors import BallTree
currentPath = os.path.dirname(os.path.realpath(__file__))
wordVecFile = os.path.join(currentPath, 'wordVectors.bin')
keyword_matrix_2018_filename = os.path.join(currentPath, "keyword_matrix_2018.csv")
checkPointPath = os.path.join(currentPath, 'checkpoint')
team_features_filename = os.path.join(currentPath, 'team_features.csv')
checkPointData = os.path.join(checkPointPath, 'model.meta')
class Word2Vec(object):
def __init__(self, fname=wordVecFile):
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split()) # 3000000 300
binary_len = np.dtype('float32').itemsize * layer1_size # 1200
for line in range(vocab_size):
words = []
while True:
ch = f.read(1)
if ch == '\xc2':
words = ''
break
if ch == ' ' or len(ch.strip()) == 0:
words = ''.join(words)
break
if ch != '\n':
words.append(str(ch))
word_vecs[words] = np.fromstring(f.read(binary_len), dtype='float32')
self.word_vecs = word_vecs
self.flag = True
def getWordVector(self, word):
if word.lower() not in self.word_vecs:
return np.random.uniform(-0.25, 0.25, 300)
else:
return self.word_vecs[word]
def recommend_team(word,k=10):
if not recommend_team.init:
print(keyword_matrix_2018_filename)
recommend_team.data = np.array(pd.read_csv(keyword_matrix_2018_filename, header = None))
print("Loading Word Vector...")
recommend_team.word2vec = Word2Vec()
recommend_team.word_vector = recommend_team.word2vec.getWordVector(word)
recommend_team.init = True
print("Finish loading.")
res = []
with tf.Session() as sess:
#load the model
saver = tf.train.import_meta_graph(checkPointData)
saver.restore(sess, tf.train.latest_checkpoint(checkPointPath))
graph = tf.get_default_graph()
y = graph.get_tensor_by_name("net_output:0")
x = graph.get_tensor_by_name("X:0")
feature = sess.run(y, feed_dict = {x: [recommend_team.word_vector]})
team_features = np.array(pd.read_csv(team_features_filename, header = None))
tree = BallTree(team_features, leaf_size=40)
dist, ind = tree.query(feature, k=10)
for i in ind:
res.append({'team_name': recommend_team.data[i,0], 'year': recommend_team.data[i,1], 'link': recommend_team.data[i,2]})
return res
recommend_team.init = False
recommend_team.data = None
recommend_team.word2vec = None
recommend_team.word_vector = None
# print(recommend_team("cancer"))
|
from typing import Optional
from fidesops.schemas.masking.masking_configuration import (
StringRewriteMaskingConfiguration,
MaskingConfiguration,
)
from fidesops.schemas.masking.masking_strategy_description import (
MaskingStrategyDescription,
MaskingStrategyConfigurationDescription,
)
from fidesops.service.masking.strategy.format_preservation import FormatPreservation
from fidesops.service.masking.strategy.masking_strategy import MaskingStrategy
STRING_REWRITE = "string_rewrite"
class StringRewriteMaskingStrategy(MaskingStrategy):
"""Masks the value with a pre-determined value"""
def __init__(
self,
configuration: StringRewriteMaskingConfiguration,
):
self.rewrite_value = configuration.rewrite_value
self.format_preservation = configuration.format_preservation
def mask(self, value: Optional[str]) -> Optional[str]:
"""Replaces the value with the value specified in strategy spec. Returns None if input is
None"""
if value is None:
return None
if self.format_preservation is not None:
formatter = FormatPreservation(self.format_preservation)
return formatter.format(self.rewrite_value)
return self.rewrite_value
@staticmethod
def get_configuration_model() -> MaskingConfiguration:
return StringRewriteMaskingConfiguration
# MR Note - We will need a way to ensure that this does not fall out of date. Given that it
# includes subjective instructions, this is not straightforward to automate
@staticmethod
def get_description() -> MaskingStrategyDescription:
return MaskingStrategyDescription(
name=STRING_REWRITE,
description="Masks the input value with a default string value",
configurations=[
MaskingStrategyConfigurationDescription(
key="rewrite_value",
description="The string that will replace existing values",
)
],
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=64)),
('text', models.CharField(max_length=280)),
('date', models.CharField(max_length=35)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
('n', models.CharField(max_length=16)),
],
),
migrations.AddField(
model_name='chat',
name='room',
field=models.ForeignKey(to='classroom.Room'),
),
]
|
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
pass |
A = [input().split() for _ in range(4)]
for i in range(4):
for j in range(3):
if A[i][j] == A[i][j + 1]:
print('CONTINUE')
exit()
if A[j][i] == A[j + 1][i]:
print('CONTINUE')
exit()
print('GAMEOVER')
|
from openerp.osv import osv, fields
from datetime import datetime, timedelta
import time
import logging
import utils
logger = logging.getLogger('sale')
class sale_shop(osv.osv):
_name = "sale.shop"
_inherit = "sale.shop"
__logger = logging.getLogger(_name)
_columns = {
'instance_id' : fields.many2one('sales.channel.instance', 'Instance', readonly=True),
'amazon_shop' : fields.boolean('Amazon Shop', readonly=True),
'amazon_margin':fields.float('Amazon Margin', size=64),
'requested_report_id': fields.char('Requested Report ID', size=100 , readonly=True),
'report_id': fields.char('Report ID', size=100 , readonly=True),
'report_update':fields.datetime('Last ASIN Import Date'),
'report_requested_datetime': fields.datetime('Report Requested'),
'fba_location':fields.many2one('stock.location', 'FBA Location'),
'amazon_fba_shop':fields.boolean('FBA Shop', readonly=True),
}
def import_listing(self, cr, uid, ids, shop_id, product_id , resultvals, context={}):
amazon_product_listing_obj = self.pool.get('amazon.product.listing')
print "===import_listing in amazonnnnnnnnnnnnnnn======>", shop_id
if isinstance(shop_id, int):
shop_obj = self.pool.get('sale.shop').browse(cr, uid, shop_id)
else:
shop_obj = shop_id
print "SHOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP"
if shop_obj.amazon_shop:
product_sku = resultvals.get('SellerSKU', False)
amazon_ids = amazon_product_listing_obj.search(cr, uid, [('product_id', '=', product_id), ('name', '=', product_sku)])
if not amazon_ids:
vals = {
'product_id': product_id,
'name': product_sku,
'title': resultvals.get('Title', False),
'asin': resultvals.get('listing_id', False),
'shop_id': shop_obj.id
}
print "*************", amazon_product_listing_obj.create(cr, uid, vals)
return super(sale_shop, self).import_listing(cr, uid, ids, shop_id, product_id, resultvals, context)
def import_amazon_orders(self, cr, uid, ids, context=None):
amazon_api_obj = self.pool.get('amazonerp.osv')
sale_order_obj = self.pool.get('sale.order')
pick_obj = self.pool.get('stock.picking')
final_resultvals = []
instance_obj = self.browse(cr, uid, ids[0])
context.update({'from_date': datetime.now()})
createdAfter = utils.calcCreatedAfter(instance_obj.last_import_order_date)
fulfillment = 'MFN'
if instance_obj.amazon_fba_shop:
fulfillment = 'AFN'
results = amazon_api_obj.call(instance_obj.instance_id, 'ListOrders', createdAfter, False, fulfillment)
print "=**********results********", results
# logger.error('results %s', results)
time.sleep(30)
result_next_token = False
if results:
last_dictionary = results[-1]
while last_dictionary.get('NextToken', False):
result_next_token = True
next_token = last_dictionary.get('NextToken', False)
del results[-1]
result_vals = []
context['shipping_product_default_code'] = 'SHIP AMAZON'
context['default_product_category'] = 1
for result in results:
print "=**********result********", result
saleorderids = sale_order_obj.search(cr, uid, [('name', '=', instance_obj.prefix + result['OrderId'] + instance_obj.suffix), ('shop_id', '=', instance_obj.id)])
if saleorderids:
if sale_order_obj.browse(cr, uid, saleorderids[0]).state != 'draft':
print 'Order Exist', result['OrderId']
continue
result_vals = amazon_api_obj.call(instance_obj.instance_id, 'ListOrderItems', result['OrderId'])
for result_val in result_vals:
print 'result_val', result_val
result_val.update(result)
final_resultvals.append(result_val)
print 'result_val : ',result_val
if final_resultvals:
order_ids = self.createOrder(cr, uid, ids, instance_obj.id, final_resultvals, context)
for saleorderid in order_ids:
sobj = sale_order_obj.browse(cr, uid, saleorderid)
if instance_obj.amazon_fba_shop:
picking_ids = sobj.picking_ids
if picking_ids:
for each_picking in picking_ids:
pick_obj.write(cr, uid, each_picking.id, {'carrier_tracking_ref':'FULFILLMENT'})
pick_obj.force_assign(cr, uid, [each_picking.id])
context.update({'location_id': instance_obj.fba_location.id})
self.do_partial(cr, uid, [each_picking.id], context)
time.sleep(25)
result_next_token = amazon_api_obj.call(instance_obj.instance_id, 'ListOrdersByNextToken', next_token)
results = result_next_token
last_dictionary = results[-1]
if last_dictionary.get('NextToken', False) == False:
break
if not result_next_token:
result_vals = []
context['shipping_product_default_code'] = 'SHIP AMAZON'
context['default_product_category'] = 1
for result in results:
print "=**********result********", result
saleorderids = sale_order_obj.search(cr, uid, [('name', '=', instance_obj.prefix + result['OrderId'] + instance_obj.suffix), ('shop_id', '=', instance_obj.id)])
if saleorderids:
if sale_order_obj.browse(cr, uid, saleorderids[0]).state != 'draft':
print 'Order Exist', result['OrderId']
continue
result_vals = amazon_api_obj.call(instance_obj.instance_id, 'ListOrderItems', result['OrderId'])
for result_val in result_vals:
print 'result_val', result_val
result_val.update(result)
final_resultvals.append(result_val)
print 'result_val : ',result_val
if final_resultvals:
order_ids = self.createOrder(cr, uid, ids, instance_obj.id, final_resultvals, context)
for saleorderid in order_ids:
sobj = sale_order_obj.browse(cr, uid, saleorderid)
if instance_obj.amazon_fba_shop:
picking_ids = sobj.picking_ids
if picking_ids:
for each_picking in picking_ids:
pick_obj.write(cr, uid, each_picking.id, {'carrier_tracking_ref':'FULFILLMENT'})
pick_obj.force_assign(cr, uid, [each_picking.id])
context.update({'location_id': instance_obj.fba_location.id})
self.do_partial(cr, uid, [each_picking.id], context)
return True
def do_partial(self, cr, uid, ids, context=None):
# no call to super!
stock_pick_obj = self.pool.get('stock.picking')
moveobj = self.pool.get('stock.move')
assert len(ids) == 1, 'Partial move processing may only be done one form at a time.'
print ids
partial = stock_pick_obj.browse(cr, uid, ids[0], context=context)
print partial
partial_data = {
'delivery_date' : partial.date
}
print partial.move_lines
moves_ids = []
for move in partial.move_lines:
if context.get('location_id', False):
moveobj.write(cr, uid, move.id, {'location_id': context.get('location_id')})
move_id = move.id
partial_data['move%s' % (move_id)] = {
'product_id': move.product_id.id,
'product_qty': move.product_qty,
'product_uom': move.product_uom.id,
# 'prodlot_id': move.prodlot_id.id,
}
moves_ids.append(move_id)
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
partial_data['move%s' % (move_id)].update(product_price=move.cost,
product_currency=move.currency.id)
self.pool.get('stock.move').do_partial(cr, uid, moves_ids, partial_data, context=context)
return True
def update_amazon_order_status(self, cr, uid, ids, context={}):
logger.error('update_amazon_order_status %s', ids)
if context == None:
context = {}
shop_obj = self.browse(cr, uid, ids[0])
instance_obj = shop_obj.instance_id
amazon_api_obj = self.pool.get('amazonerp.osv')
sale_order_obj = self.pool.get('sale.order')
sale_ids = [1]
offset = 0
while len(sale_ids):
today_data = time.strftime("%Y-%m-%d")
print 'today_data', today_data
sale_ids = sale_order_obj.search(cr, uid, [('track_exported', '=', False), ('state', '=', 'done'), ('shop_id', '=', shop_obj.id)], offset, 100, 'id')
logger.error('sale_ids %s', sale_ids)
if not sale_ids:
break
offset += len(sale_ids)
message_information = ''
message_id = 1
today = datetime.now()
DD = timedelta(seconds=120)
earlier = today - DD
fulfillment_date = earlier.strftime("%Y-%m-%dT%H:%M:%S")
fulfillment_date_concat = str(fulfillment_date) + '-00:00'
for sale_data in sale_order_obj.browse(cr, uid, sale_ids):
order_id = sale_data.unique_sales_rec_no # for getting order_id
logger.error('order_id %s', order_id)
print "======sale_data.picking_ids=====>", sale_data.picking_ids
if sale_data.picking_ids:
picking_data = sale_data.picking_ids[0]
tracking_id = picking_data.carrier_tracking_ref # for getting tracking_id
carrier_id = picking_data.carrier_id
if not carrier_id:
continue
carrier_name = carrier_id.carrier_name
shipping_method = carrier_id.shipping_method
for each_line in sale_data.order_line:
product_qty = int(each_line.product_uom_qty)
product_order_item_id = each_line.unique_sales_line_rec_no
fulfillment_date = picking_data.date_done.replace(' ', 'T')[:19]
fulfillment_date_concat = str(fulfillment_date) + '-00:00'
print 'fulfillment_date_concat', fulfillment_date_concat
logger.error('fulfillment_date_concat %s', fulfillment_date_concat)
item_string = '''<Item><AmazonOrderItemCode>%s</AmazonOrderItemCode>
<Quantity>%s</Quantity></Item>''' % (product_order_item_id, product_qty)
message_information += """<Message>
<MessageID>%s</MessageID>
<OperationType>Update</OperationType>
<OrderFulfillment><AmazonOrderID>%s</AmazonOrderID>
<FulfillmentDate>%s</FulfillmentDate>
<FulfillmentData>
<CarrierName>%s</CarrierName>
<ShippingMethod>%s</ShippingMethod>
<ShipperTrackingNumber>%s</ShipperTrackingNumber>
</FulfillmentData>%s</OrderFulfillment>
</Message>""" % (message_id, order_id, fulfillment_date_concat, carrier_name, shipping_method, tracking_id, item_string.encode("utf-8"))
message_id = message_id + 1
data = """<?xml version="1.0" encoding="utf-8"?><AmazonEnvelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="amzn-envelope.xsd"><Header><DocumentVersion>1.01</DocumentVersion><MerchantIdentifier>M_SELLERON_82825133</MerchantIdentifier></Header><MessageType>OrderFulfillment</MessageType>""" + message_information.encode("utf-8") + """</AmazonEnvelope>"""
logger.error('data ---------> %s', data)
results = amazon_api_obj.call(instance_obj, 'POST_ORDER_FULFILLMENT_DATA', data)
logger.error('results ---------> %s', results)
for sale_data in sale_order_obj.browse(cr, uid, sale_ids):
sale_data.write({'track_exported':True})
cr.commit()
time.sleep(70)
return True
# Listing
def request_products_report(self, cr, uid, ids, context=None):
# try:
if context == None:
context = {}
(data,) = self.browse(cr, uid, ids , context=context)
instance_obj = data.instance_id
amazon_api_obj = self.pool.get('amazonerp.osv')
StartDate = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()) + '.000Z'
reportData = amazon_api_obj.call(instance_obj, 'RequestReport', '_GET_MERCHANT_LISTINGS_DATA_', StartDate)
print "======reportData======>", reportData
if reportData.get('ReportProcessingStatus', False):
if reportData['ReportProcessingStatus'] == '_SUBMITTED_':
self.write(cr, uid, ids, {'requested_report_id':reportData['ReportRequestId'], 'report_id':'', 'report_requested_datetime':time.strftime("%Y-%m-%d %H:%M:%S")}, context)
cr.commit()
else:
if context.get('raise_exception', False):
raise osv.except_osv(_('Error Sending Request'), '%s' % _(reportData['ReportProcessingStatus']))
else:
if context.get('raise_exception', False):
raise osv.except_osv(_('Error Sending Request'), '%s' % _('Null Response'))
return True
def check_report_status(self, cr, uid, ids, context=None):
# try:
if context == None:
context = {}
(data,) = self.browse(cr, uid, ids , context=context)
instance_obj = data.instance_id
amazon_api_obj = self.pool.get('amazonerp.osv')
if not data.requested_report_id:
raise osv.except_osv(_('Error !'), '%s' % _('Please request Report'))
reportList = amazon_api_obj.call(instance_obj, 'GetReportList', False, data.requested_report_id, False, False)
print "======reportList=====>", reportList
if reportList:
self.write(cr, uid, ids, {'report_id':reportList[0]}, context)
cr.commit()
else:
if not context.get('raise_exception', False):
raise osv.except_osv(_('Error !'), '%s' % _('Request Status Not Done'))
return True
def handleMissingAsins(self, cr, uid, ids, missed_resultvals, context=None):
count = 0
amazon_stock_synch_obj = self.pool.get('amazon.stock.sync')
while (missed_resultvals):
count = count + 1 # ## count is to make sure loop doesn't go into endless iteraiton
if count > 3:
break
resultvals = missed_resultvals
print 'missed_resultvals', missed_resultvals
for results in resultvals:
print 'results', results
try:
amazon_stock_synch_obj.write(cr, uid, [results['stock_sync_id']], results)
cr.commit()
missed_resultvals.remove(results)
except Exception, e:
print "Import Amazon Listing handleMissingItems: ", e
time.sleep(20)
return True
def import_amazon_products(self, cr, uid, ids, context=None):
(data,) = self.browse(cr, uid, ids , context=context)
amazon_api_obj = self.pool.get('amazonerp.osv')
prod_obj = self.pool.get('product.product')
amazon_product_listing_obj = self.pool.get('amazon.product.listing')
if not data.report_id:
raise osv.except_osv('Error', '%s' % ('Please request New Report'))
instance_obj = data.instance_id
missed_resultvals = []
response = amazon_api_obj.call(instance_obj, 'GetReport', data.report_id)
amazon_create_vals = {}
if response:
product_inv_data_lines = response.split("\n")
count = 0
for product_inv_data_line in product_inv_data_lines:
count += 1
if count == 1:
continue
if product_inv_data_line == '' :
continue
try:
product_inv_data_fields = product_inv_data_line.split('\t')
sku = product_inv_data_fields[3].strip(" ")
asin = product_inv_data_fields[16].strip(" ")
amazon_stock = product_inv_data_fields[5].strip(" ")
amazon_price = product_inv_data_fields[4].strip(" ")
name = (product_inv_data_fields[0].strip(" ")).encode('utf-8')
print "======name========>", name
if len(sku.split(" ")):
fulfillment_channel = 'DEFAULT'
print "================sku,", sku
product_ids = prod_obj.search(cr, uid, [('default_code', '=', sku)])
print "=======product_ids======>", product_ids
if not product_ids:
product_ids = [prod_obj.create(cr, uid, {'default_code': sku, 'name': name, 'list_price':float(amazon_price)})]
print 'product_ids ===', product_ids
if not len(product_ids):
continue
if asin == '':
continue
listing_ids = amazon_product_listing_obj.search(cr, uid, [('product_id', '=', product_ids[0]), ('name', '=', sku), ('asin', '=', asin), ('shop_id', '=', data.id)])
print 'listing_ids', listing_ids
fulfillment_channel = 'DEFAULT'
try:
price = float(amazon_price)
except:
price = 0.0
pass
print 'price', price
print 'amazon_stock', amazon_stock
if amazon_stock == '':
continue
amazon_create_vals = {
'listing_name':sku,
'name':sku,
'asin':asin,
'fulfillment_channel':fulfillment_channel,
'product_id':product_ids[0],
'shop_id':data.id,
'active_amazon':True,
'last_sync_stock':amazon_stock,
'last_sync_price':price,
'last_sync_date':data.report_requested_datetime,
'title': name or ' '
}
print 'amazon_create_vals', amazon_create_vals
if not listing_ids:
listing_id = amazon_product_listing_obj.create(cr, uid, amazon_create_vals)
else:
amazon_product_listing_obj.write(cr, uid, listing_id[0], amazon_create_vals)
print 'listing_id', listing_id
cr.commit()
# if count % 7 == 0:
# raise Exception("concurrent update")
except Exception, e:
print "handleUpdate ASIN Exception: ", e
if str(e).find('concurrent update') != -1:
cr.rollback()
time.sleep(20)
missed_resultvals.append(amazon_create_vals)
continue
# Handle Misses ASIN ORders
self.handleMissingAsins(cr, uid, ids, missed_resultvals)
# Inactivate all the ASIN which are not synced
cr.execute('select id from amazon_product_listing where (last_sync_date < %s or last_sync_date is null) and shop_id = %s ', (data.report_update, data.id))
amazon_listing_ids = filter(None, map(lambda x: x[0], cr.fetchall()))
print 'amazon_listing_ids', amazon_listing_ids
for each_listing in amazon_listing_ids:
try:
amazon_product_listing_obj.write(cr, uid, [each_listing], {'last_sync_stock':0, 'last_sync_date':data.report_update})
# except Exception, e:
# print "--->",e
except Exception, e:
if str(e).find('concurrent update') != -1:
cr.rollback()
time.sleep(20)
self.write(cr, uid, ids, {'report_update': datetime.now(), 'requested_report_id':False})
return True
# stock
def xml_format(self, message_type, merchant_string, message_data):
result = """
<?xml version="1.0" encoding="utf-8"?>
<AmazonEnvelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="amzn-envelope.xsd">
<Header>
<DocumentVersion>1.01</DocumentVersion>
""" + merchant_string.encode("utf-8") + """
</Header>
""" + message_type.encode("utf-8") + """
""" + message_data.encode("utf-8") + """
</AmazonEnvelope>"""
return result
def _export_amazon_stock_generic(self, cr, uid, ids, instance_obj, xml_data, context=None):
if context == None:
context = {}
amazon_api_obj = self.pool.get('amazonerp.osv')
merchant_string = "<MerchantIdentifier>%s</MerchantIdentifier>" % (instance_obj.aws_merchant_id)
message_type = '<MessageType>Inventory</MessageType>'
stock_data = self.xml_format(message_type, merchant_string, xml_data)
stock_submission_id = False
print 'stock_data*****************************************', stock_data
try:
stock_submission_id = amazon_api_obj.call(instance_obj, 'POST_INVENTORY_AVAILABILITY_DATA', stock_data)
print 'stock_submission_id', stock_submission_id
except Exception, e:
raise osv.except_osv(_('Error !'), _('%s') % (e))
return True
def export_amazon_stock(self, cr, uid, ids, context=None):
print 'context in price', context
amazon_prod_list_obj = self.pool.get('amazon.product.listing')
if context == None:
context = {}
context.update({'from_date': datetime.now()})
(data,) = self.browse(cr, uid, ids)
amazon_inst_data = data.instance_id
if context.has_key('listing_ids'):
listing_ids = context.get('listing_ids')
else:
listing_ids = amazon_prod_list_obj.search(cr, uid, [('active_amazon', '=', True), ('shop_id', '=', data.id)])
xml_data = ''
message_id = 1
for amazon_list_data in amazon_prod_list_obj.browse(cr, uid, listing_ids):
if amazon_list_data.product_id.type == 'service':
continue
if not amazon_list_data.name:
raise osv.except_osv(_('Please enter SKU for '), '%s' % _(amazon_list_data.name))
qty = amazon_list_data.product_id.qty_available
# If stock goes Negative , Update it to 0, because amazon doesnt accept it and API Fails
if int(qty) < 0:
qty = 0
update_xml_data = '''<SKU><![CDATA[%s]]></SKU>
<Quantity>%s</Quantity>
''' % (amazon_list_data.name, int(qty))
xml_data += '''<Message>
<MessageID>%s</MessageID><OperationType>Update</OperationType>
<Inventory>%s</Inventory></Message>
''' % (message_id, update_xml_data)
message_id += 1
if xml_data != '':
self._export_amazon_stock_generic(cr, uid, ids, amazon_inst_data, xml_data)
return True
# price
def _export_amazon_price_generic(self, cr, uid, ids, instance_obj, xml_data, context=None):
amazon_api_obj = self.pool.get('amazonerp.osv')
merchant_string = "<MerchantIdentifier>%s</MerchantIdentifier>" % (instance_obj.aws_merchant_id)
message_type = """<MessageType>Price</MessageType>"""
price_data = self.xml_format(message_type, merchant_string, xml_data)
print 'price_data*************', price_data
price_submission_id = False
try:
price_submission_id = amazon_api_obj.call(instance_obj, 'POST_PRODUCT_PRICING_DATA', price_data)
print 'price_submission_id', price_submission_id
except Exception, e:
raise osv.except_osv(_('Error !'), _('%s') % (e))
return True
def export_amazon_price(self, cr, uid, ids, context=None):
print 'context in price', context
amazon_prod_list_obj = self.pool.get('amazon.product.listing')
if context == None:
context = {}
context.update({'from_date': datetime.now()})
(data,) = self.browse(cr, uid, ids)
instance_obj = data.instance_id
if context.has_key('listing_ids'):
listing_ids = context.get('listing_ids')
else:
listing_ids = amazon_prod_list_obj.search(cr, uid, [('active_amazon', '=', True), ('shop_id', '=', data.id)])
price_string = ''
message_id = 1
for amazon_list_data in amazon_prod_list_obj.browse(cr, uid, listing_ids):
if amazon_list_data.product_id.type == 'service':
continue
if not amazon_list_data.name:
raise osv.except_osv(_('Please enter SKU for '), '%s' % _(amazon_list_data.title))
price = amazon_list_data.last_sync_price
if float(price) > 0.00:
price_string += """<Message>
<MessageID>%s</MessageID>
<Price>
<SKU><![CDATA[%s]]></SKU>
<StandardPrice currency='%s'>%.2f</StandardPrice>
</Price>
</Message>""" % (message_id, amazon_list_data.name, amazon_list_data.product_id.company_id.currency_id.name, float(price))
message_id += 1
print "=========amazon_list_data.product_id.company_id.currency_id.name============>", amazon_list_data.product_id.name, amazon_list_data.product_id.company_id.name, amazon_list_data.product_id.company_id.currency_id.name
if price_string != '':
self._export_amazon_price_generic(cr, uid, ids, instance_obj, price_string)
return True
# Upload Listing Methods
def _my_value(self, cr, uid, location_id, product_id, context=None):
cr.execute(
'select sum(product_qty) '\
'from stock_move '\
'where location_id NOT IN %s '\
'and location_dest_id = %s '\
'and product_id = %s '\
'and state = %s ', tuple([(location_id,), location_id, product_id, 'done']))
wh_qty_recieved = cr.fetchone()[0] or 0.0
# this gets the value which is sold and confirmed
argumentsnw = [location_id, (location_id,), product_id, ('done',)] # this will take reservations into account
cr.execute(
'select sum(product_qty) '\
'from stock_move '\
'where location_id = %s '\
'and location_dest_id NOT IN %s '\
'and product_id = %s '\
'and state in %s ', tuple(argumentsnw))
qty_with_reserve = cr.fetchone()[0] or 0.0
qty_available = wh_qty_recieved - qty_with_reserve
return qty_available
def import_amazon_stock(self, cr, uid, ids, context={}):
listing_obj = self.pool.get('amazon.product.listing')
amazon_api_obj = self.pool.get('amazonerp.osv')
(obj,) = self.browse(cr, uid, ids)
listing_ids = listing_obj.search(cr, uid, [('shop_id', '=', ids[0])])
sku_list = []
for record in listing_obj.browse(cr, uid, listing_ids):
sku_list.append(record.name)
print "========sku_list===>", sku_list
result = amazon_api_obj.call(obj.instance_id, 'ListInventorySupply', sku_list)
print "=====result======>", result
if result:
for rec in result:
print "===>", rec.get('SellerSKU')
l_ids = listing_obj.search(cr, uid, [('name', '=', rec['SellerSKU'])])
if l_ids:
listing_obj.write(cr, uid, l_ids[0], {'last_sync_stock': float(rec['InStockSupplyQuantity'])})
print "===========result====>", result
return True
sale_shop()
class sale_order(osv.osv):
_name = 'sale.order'
_inherit = 'sale.order'
def _default_journal(self, cr, uid, context={}):
accountjournal_obj = self.pool.get('account.journal')
accountjournal_ids = accountjournal_obj.search(cr, uid, [('name', '=', 'Sales Journal')])
if accountjournal_ids:
return accountjournal_ids[0]
else:
# raise wizard.except_wizard(_('Error !'), _('Sales journal not defined.'))
return False
_columns = {
'amazon_order_id' : fields.char('Order ID', size=256),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'faulty_order':fields.boolean('Faulty'),
'confirmed':fields.boolean('Confirmed'),
'shipservicelevel':fields.char('ShipServiceLevel', size=64),
}
_defaults = {
'journal_id': _default_journal,
}
sale_order()
class sale_order_line(osv.osv):
_name = 'sale.order.line'
_inherit = 'sale.order.line'
_columns = {
'order_item_id' : fields.char('Order Item ID', size=256),
'asin' : fields.char('Asin', size=256),
}
sale_order_line()
|
import sys
import os
import unittest
import math
import logging
from osgeo import ogr
from invest_natcap.dbfpy import dbf
from invest_natcap.timber import timber_core
class TestTimber(unittest.TestCase):
def test_timber_summationOne_NotImmedHarv(self):
"""Test of the first summation in the Net Present Value equation when
immediate harvest is NO. Using known inputs. Calculated value and
Hand Calculations compared against the models equation"""
mdr_perc = 1.07
harvest_value = 3990.0
freq_Harv = 2
num_Years = 4
upper_limit = int(math.floor(num_Years / freq_Harv))
lower_limit = 1
subtractor = 1
#Calculated value by hand:
summationCalculatedByHand = 6986.000492
summation = timber_core.npvSummationOne(
lower_limit, upper_limit, harvest_value, mdr_perc,
freq_Harv, subtractor)
summationCalculated = 0.0
for num in range(lower_limit, upper_limit + 1):
summationCalculated = \
summationCalculated + (harvest_value /\
((1.07) ** ((freq_Harv * num) - subtractor)))
self.assertAlmostEqual(summationCalculatedByHand, summation, 5)
self.assertAlmostEqual(summationCalculated, summation, 5)
def test_timber_summationOne_ImmedHarv(self):
"""Test of the first summation in the Net Present Value equation when
immediate harvest is YES. Using known inputs. Calculated value
and Hand Calculations compared against the models equation"""
mdr_perc = 1.07
harvest_value = 3990.0
freq_Harv = 2
num_Years = 4
upper_limit = int(math.ceil((num_Years / freq_Harv) - 1.0))
lower_limit = 0
subtractor = 0
#Calculated value by hand:
summationCalculatedByHand = 7475.020526
summation = timber_core.npvSummationOne(
lower_limit, upper_limit, harvest_value, mdr_perc,
freq_Harv, subtractor)
summationCalculated = 0.0
for num in range(lower_limit, upper_limit + 1):
summationCalculated = \
summationCalculated + (harvest_value /\
((1.07) ** ((freq_Harv * num) - subtractor)))
self.assertAlmostEqual(summationCalculatedByHand, summation, 5)
self.assertAlmostEqual(summationCalculated, summation, 5)
def test_timber_summationTwo(self):
"""Test of the second summation in the Net Present Value equation using
known inputs. Calculated value and Hand Calculations
compared against the models equation"""
lower_limit = 0
upper_limit = 3
maint_Cost = 100
mdr_perc = 1.07
#Calculated value by hand:
summationCalculatedByHand = 362.4316044
summation = timber_core.npvSummationTwo(
lower_limit, upper_limit, maint_Cost, mdr_perc)
summationCalculated = 0.0
for num in range(0, 4):
summationCalculated = \
summationCalculated + (maint_Cost / ((1.07) ** num))
self.assertAlmostEqual(summationCalculatedByHand, summation, 5)
self.assertAlmostEqual(summationCalculated, summation, 5)
def test_timber_smoke(self):
"""Smoke test for Timber. Model should not crash with
basic input requirements"""
#Set the path for the test inputs/outputs and check to make sure the
#directory does not exist
smoke_path = './invest-data/test/data/test_out/timber/Smoke/'
if not os.path.isdir(smoke_path):
os.makedirs(smoke_path)
else:
try:
os.remove(smoke_path)
except:
os.rmdir(smoke_path)
#Define the paths for the sample input/output files
dbf_path = os.path.join(smoke_path, 'test.dbf')
shp_path = os.path.join(smoke_path, 'timber.shp')
#Create our own dbf file with basic attributes for one polygon
db = dbf.Dbf(dbf_path, new=True)
db.addField(('PRICE', 'N', 3), ('T', 'N', 2), ('BCEF', 'N', 1),
('Parcel_ID', 'N', 1), ('Parcl_area', 'N', 4),
('Perc_harv', 'N', 2), ('Harv_mass', 'N', 3),
('Freq_harv', 'N', 2), ('Maint_cost', 'N', 3),
('Harv_cost', 'N', 3), ('Immed_harv', 'C', 1))
rec = db.newRecord()
rec['PRICE'] = 100
rec['T'] = 2
rec['BCEF'] = 1
rec['Parcel_ID'] = 1
rec['Parcl_area'] = 1
rec['Perc_harv'] = 10
rec['Harv_mass'] = 100
rec['Freq_harv'] = 1
rec['Maint_cost'] = 0
rec['Harv_cost'] = 0
rec['Immed_harv'] = 'Y'
rec.store()
db.close()
#Create our own basic shapefile with one polygon to run through the
#model
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
ds = drv.CreateDataSource(shp_path)
lyr = ds.CreateLayer('timber', None, ogr.wkbPolygon)
#Creating a field because OGR will not allow an empty feature,
#it will default by putting FID_1
#as a field. OGR will also self create the FID and Shape field.
field_defn = ogr.FieldDefn('Parcl_ID', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
index = feat.GetFieldIndex('Parcl_ID')
feat.SetField(index, 1)
#save the field modifications to the layer.
lyr.SetFeature(feat)
feat.Destroy()
db = dbf.Dbf(dbf_path)
#Arguments to be past to the model
args = {'timber_shape': ds,
'attr_table':db,
'mdr':7,
}
timber_core.execute(args)
#Hand calculated values for the above inputs.
#To be compared with the timber model's output of the created shapefile.
tnpv = 1934.579439
tbio = 20
tvol = 20
lyr = ds.GetLayerByName('timber')
feat = lyr.GetFeature(0)
for field, value in (
('TNPV', tnpv), ('TBiomass', tbio), ('TVolume', tvol)):
field_index = feat.GetFieldIndex(field)
field_value = feat.GetField(field_index)
self.assertAlmostEqual(value, field_value, 6)
#This is how OGR closes and flushes its datasources
ds.Destroy()
ds = None
db.close()
def test_timber_BioVol(self):
"""Biomass and Volume test for timber model. Creates an attribute
table and shapefile with set values. Compares calculated Biomass
and Volume with that from running the shapefile through the model. """
#Set the path for the test inputs/outputs and check to make sure
#the directory does not exist
dir_path = './invest-data/test/data/test_out/timber/biovol/Output/'
#Deleting any files in the output if they already exist, this
#caused a bug once when I didn't do this.
if os.path.isdir(dir_path):
textFileList = os.listdir(dir_path)
for file in textFileList:
os.remove(dir_path + file)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
shp_path = dir_path
dbf_path = os.path.join(dir_path, 'test.dbf')
#Create our own dbf file with basic attributes for one polygon
db = dbf.Dbf(dbf_path, new=True)
db.addField(('PRICE', 'N', 3), ('T', 'N', 2), ('BCEF', 'N', 1),
('Parcel_ID', 'N', 1), ('Parcl_area', 'N', 4),
('Perc_harv', 'N', 2), ('Harv_mass', 'N', 3),
('Freq_harv', 'N', 2), ('Maint_cost', 'N', 3),
('Harv_cost', 'N', 3), ('Immed_harv', 'C', 1))
rec = db.newRecord()
rec['PRICE'] = 400
rec['T'] = 4
rec['BCEF'] = 1
rec['Parcel_ID'] = 1
rec['Parcl_area'] = 800
rec['Perc_harv'] = 10.0
rec['Harv_mass'] = 100
rec['Freq_harv'] = 2
rec['Maint_cost'] = 100
rec['Harv_cost'] = 100
rec['Immed_harv'] = 'Y'
rec.store()
db.close()
#Calculate Biomass,Volume, and TNPV by hand to 3 decimal places.
calculatedBiomass = 16000
calculatedVolume = 16000
TNPV = 5690071.137
#Create our own shapefile with a polygon to run through the model
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
ds = drv.CreateDataSource(shp_path)
lyr = ds.CreateLayer('timber', None, ogr.wkbPolygon)
#Creating a field because OGR will not allow an empty feature,
#it will default by putting FID_1
#as a field. OGR will also self create the FID and Shape field.
field_defn = ogr.FieldDefn('Parcl_ID', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
index = feat.GetFieldIndex('Parcl_ID')
feat.SetField(index, 1)
#save the field modifications to the layer.
lyr.SetFeature(feat)
feat.Destroy()
db = dbf.Dbf(dbf_path)
#Arguments to be past to the model
args = {'timber_shape': ds,
'attr_table':db,
'mdr':7,
}
timber_core.execute(args)
#Compare Biomass, Volume, and TNPV calculations
lyr = ds.GetLayerByName('timber')
feat = lyr.GetFeature(0)
for field, value in (
('TNPV', TNPV), ('TBiomass', calculatedBiomass),
('TVolume', calculatedVolume)):
field_index = feat.GetFieldIndex(field)
field_value = feat.GetField(field_index)
self.assertAlmostEqual(value, field_value, 2)
#This is how OGR closes and flushes its datasources
ds.Destroy()
ds = None
lyr = None
db.close()
def test_timber_with_inputs(self):
"""Test timber model with real inputs. Compare copied and
modified shapefile with valid shapefile that was created from
the same inputs. Regression test."""
#Open table and shapefile
input_dir = './invest-data/test/data/timber/input/'
out_dir = './invest-data/test/data/test_out/timber/with_inputs/'
attr_table = dbf.Dbf(os.path.join(input_dir, 'plant_table.dbf'))
test_shape = ogr.Open(os.path.join(input_dir, 'plantation.shp'), 1)
#Add the Output directory onto the given workspace
output_uri = os.path.join(out_dir, 'timber.shp')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if os.path.isfile(output_uri):
os.remove(output_uri)
shape_source = output_uri
ogr.GetDriverByName('ESRI Shapefile').\
CopyDataSource(test_shape, shape_source)
timber_output_shape = ogr.Open(shape_source, 1)
timber_output_layer = timber_output_shape.GetLayerByName('timber')
args = {'timber_shape': timber_output_shape,
'attr_table':attr_table,
'mdr':7,
}
timber_core.execute(args)
valid_output_shape = ogr.Open(
'./invest-data/test/data/timber/regression_data/timber.shp')
valid_output_layer = valid_output_shape.GetLayerByName('timber')
#Check that the number of features (polygons) are the same between
#shapefiles
num_features_valid = valid_output_layer.GetFeatureCount()
num_features_copy = timber_output_layer.GetFeatureCount()
self.assertEqual(num_features_valid, num_features_copy)
#If number of features are equal, compare each shapefiles 3 fields
if num_features_valid == num_features_copy:
for i in range(num_features_valid):
feat = valid_output_layer.GetFeature(i)
feat2 = timber_output_layer.GetFeature(i)
for field in ('TNPV', 'TBiomass', 'TVolume'):
field_index = feat.GetFieldIndex(field)
field_value = feat.GetField(field_index)
field_index2 = feat2.GetFieldIndex(field)
field_value2 = feat2.GetField(field_index2)
self.assertAlmostEqual(field_value, field_value2, 2)
#This is how OGR cleans up and flushes datasources
test_shape.Destroy()
timber_output_shape.Destroy()
valid_output_shape = None
timber_output_shape = None
test_shape = None
timber_output_layer = None
attr_table.close()
|
from Function import Function
from Potentials import GaussianFunction, TableFunction, CategoricalGaussianFunction
import numpy as np
from numpy.linalg import det, inv
class NeuralNetFunction(Function):
"""
Usage:
nn = NeuralNetFunction(
(in, inner, RELU),
(inner, out, None)
)
for _ in range(max_iter):
predict = nn.forward(x)
d_loss = compute_loss_gradient(predict, target)
_, d_network = backward(d_loss)
for layer, (d_W, d_b) in d_network.items():
layer.W -= d_W * lr
layer.b -= d_b * lr
Note:
The input data x is 2 dimensional,
where the first dimension represents data point,
and the second dimension represents features.
"""
def __init__(self, *args):
Function.__init__(self)
self.layers = []
for i_size, o_size, act in args:
"""
i_size: input size
o_size: output size
act: activation function
"""
self.layers.append(
LinearLayer(i_size, o_size)
)
if act is not None:
self.layers.append(act)
self.cache = None # Cache for storing the forward propagation results
def set_parameters(self, parameters):
idx = 0
for layer in self.layers:
if type(layer) is LinearLayer:
layer.W, layer.b = parameters[idx]
idx += 1
def parameters(self):
parameters = list()
for layer in self.layers:
if type(layer) is LinearLayer:
parameters.append(
(layer.W, layer.b)
)
return parameters
def __call__(self, *parameters):
x = np.array(parameters, dtype=float)
x = x[np.newaxis]
for layer in self.layers:
x = layer.forward(x)
return x
def forward(self, x, save_cache=True): # x must be numpy array
if save_cache:
self.cache = [x]
for layer in self.layers:
x = layer.forward(x)
if save_cache:
self.cache.append(x)
return x
def backward(self, d_y, x=None): # d_y must be numpy array
if x is not None:
self.forward(x)
d_x = d_y
d_network = dict()
for idx in reversed(range(len(self.layers))):
layer = self.layers[idx]
x = self.cache[idx]
d_x, d_param = layer.backward(d_x, x)
if d_param is not None:
d_network[layer] = d_param
return d_x, d_network
class ReLU:
@staticmethod
def forward(x):
return np.maximum(0, x)
@staticmethod
def backward(d_y, x):
d_x = np.array(d_y, copy=True)
d_x[x <= 0] = 0
return d_x, None
class LeakyReLU:
def __init__(self, slope=0.01):
self.slope = slope
def forward(self, x):
return np.maximum(0, x) + np.minimum(0, x) * self.slope
def backward(self, d_y, x):
d_x = np.array(d_y, copy=True)
d_x[x <= 0] *= self.slope
return d_x, None
class ELU:
def __init__(self, alpha=0.01):
self.alpha = alpha
def forward(self, x):
return np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1))
def backward(self, d_y, x):
d_x = np.array(d_y, copy=True)
temp = self.alpha * np.exp(x)
idx = (temp - self.alpha) <= 0
d_x[idx] *= temp[idx]
return d_x, None
class LinearLayer:
def __init__(self, i_size, o_size):
self.i_size = i_size
self.o_size = o_size
self.W = np.random.randn(i_size, o_size) * 0.1
self.b = np.random.randn(o_size) * 0.1
def forward(self, x):
return x @ self.W + self.b
def backward(self, d_y, x):
d_W = x.T @ d_y
d_b = np.sum(d_y, axis=0)
d_x = d_y @ self.W.T
return d_x, (d_W, d_b)
class NeuralNetPotential(Function):
"""
A wrapper for NeuralNetFunction class, such that the function call will return the value of exp(nn(x)).
"""
def __init__(self, *args):
self.dimension = args[0][0] # The dimension of the input parameters
self.nn = NeuralNetFunction(*args)
def __call__(self, *parameters):
return np.exp(self.nn(*parameters))
def batch_call(self, x):
return np.exp(self.nn.forward(x, save_cache=False)).reshape(-1)
def set_parameters(self, parameters):
self.nn.set_parameters(parameters)
def parameters(self):
return self.nn.parameters()
class GaussianNeuralNetPotential(Function):
def __init__(self, *args):
self.dimension = args[0][0] # The dimension of the input parameters
self.nn = NeuralNetFunction(*args)
self.prior = None
def __call__(self, *parameters):
return np.exp(self.nn(*parameters)) * (self.prior(*parameters) + 0.001)
def batch_call(self, x):
return np.exp(self.nn.forward(x, save_cache=False)).reshape(-1) * (self.prior.batch_call(x) + 0.001)
def set_empirical_prior(self, data):
mu = np.mean(data, axis=0).reshape(-1)
sig = np.cov(data.T).reshape(self.dimension, self.dimension)
if self.prior is None:
self.prior = GaussianFunction(mu, sig)
else:
self.prior.set_parameters(mu, sig)
def set_parameters(self, parameters):
self.nn.set_parameters(parameters[0])
if self.prior is None:
self.prior = GaussianFunction(*parameters[1])
else:
self.prior.set_parameters(*parameters[1])
def parameters(self):
return (
self.nn.parameters(),
(self.prior.mu, self.prior.sig)
)
class TableNeuralNetPotential(Function):
def __init__(self, *args, domains):
self.dimension = args[0][0] # The dimension of the input parameters
self.nn = NeuralNetFunction(*args)
self.domains = domains
self.prior = None
def __call__(self, *parameters):
return np.exp(self.nn(*parameters)) * (self.prior(*parameters) + 0.001)
def batch_call(self, x):
return np.exp(self.nn.forward(x, save_cache=False)).reshape(-1) * (self.prior.batch_call(x) + 0.001)
def set_empirical_prior(self, data):
table = np.zeros(shape=[len(d.values) for d in self.domains])
idx, count = np.unique(data, return_counts=True, axis=0)
table[tuple(idx.T)] = count
table /= np.sum(table)
if self.prior is None:
self.prior = TableFunction(table)
else:
self.prior.table = table
def set_parameters(self, parameters):
self.nn.set_parameters(parameters[0])
if self.prior is None:
self.prior = TableFunction(parameters[1])
else:
self.prior.table = parameters[1]
def parameters(self):
return (
self.nn.parameters(),
self.prior.table
)
class CGNeuralNetPotential(Function):
def __init__(self, *args, domains):
self.dimension = args[0][0] # The dimension of the input parameters
self.nn = NeuralNetFunction(*args)
self.domains = domains
self.prior = None
def __call__(self, *parameters):
return np.exp(self.nn(*parameters)) * (self.prior(*parameters) + 0.001)
def batch_call(self, x):
return np.exp(self.nn.forward(x, save_cache=False)).reshape(-1) * (self.prior.batch_call(x) + 0.001)
def set_empirical_prior(self, data):
c_idx = [i for i, d in enumerate(self.domains) if d.continuous]
d_idx = [i for i, d in enumerate(self.domains) if not d.continuous]
w_table = np.zeros(shape=[len(self.domains[i].values) for i in d_idx])
dis_table = np.zeros(shape=w_table.shape, dtype=int)
idx, count = np.unique(data[:, d_idx].astype(int), return_counts=True, axis=0)
w_table[tuple(idx.T)] = count
w_table /= np.sum(w_table)
dis = [GaussianFunction(np.zeros(len(d_idx)), np.eye(len(d_idx)))]
for row in idx:
row_idx = np.where(np.all(data[:, d_idx] == row, axis=1))
row_data = data[row_idx][:, c_idx]
if len(row_data) <= 1:
continue
mu = np.mean(row_data, axis=0).reshape(-1)
sig = np.cov(row_data.T).reshape(len(c_idx), len(c_idx))
dis_table[tuple(row)] = len(dis)
dis.append(GaussianFunction(mu, sig))
if self.prior is None:
self.prior = CategoricalGaussianFunction(w_table, dis_table, dis, self.domains)
else:
self.prior.set_parameters(w_table, dis_table, dis, self.domains)
def set_parameters(self, parameters):
self.nn.set_parameters(parameters[0])
if self.prior is None:
self.prior = CategoricalGaussianFunction(*parameters[1], self.domains)
else:
self.prior.set_parameters(*parameters[1], self.domains)
def parameters(self):
return (
self.nn.parameters(),
(self.prior.w_table, self.prior.dis_table, self.prior.dis)
)
|
from django import forms
from .models import *
class NoticeBoardForm(forms.ModelForm):
class Meta:
model = NoticeBoard
fields = ['message']
def clean_message(self):
message = self.cleaned_data.get('message')
if (message == ""):
raise forms.ValidationError('Please add a message here')
return message
class NoticeBoardSearchForm(forms.ModelForm):
message = forms.CharField(required=False)
class Meta:
model = NoticeBoard
fields = ['sent_by']
class ClinicalForm(forms.ModelForm):
class Meta:
model = Clinical
fields = ['quarter', 'clinical_name', 'region','species', 'species_breed', 'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'localty',
'localty_longitude',
'localty_latitude',
'animal_group_size',
'animalID',
'pet_name',
'animal_live_weight',
'anamnesis',
'principal_signs',
'clinical_diagnosis',
'clinical_prognosis',
'comment',
# 'timestamp',
]
def clean_clinical_name(self): # Validates the Clinical Name Field
clinical_name = self.cleaned_data.get('clinical_name')
if (clinical_name == None):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.clinical_name == clinical_name:
# raise forms.ValidationError('There is a clinical with the name ' + clinical_name)
return clinical_name
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class ClinicalApproveOneForm(forms.ModelForm):
class Meta:
model = Clinical
fields = ['approve_one', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class ClinicalApproveTwoForm(forms.ModelForm):
class Meta:
model = Clinical
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class ClinicalSearchForm(forms.ModelForm):
class Meta:
model = Clinical
fields = ['clinical_name', 'owner_name', 'export_to_CSV']
class SearchForm(forms.Form): # Customized Form to be to be used to save items in the database
employee = forms.CharField(required=False)
# start_date = forms.DateTimeField(required=False, label=" Start Date and Time")
# end_date = forms.DateTimeField(required=False, label=" End Date and Time")
export_to_CSV = forms.BooleanField(required=False, label="Export to CSV")
class QuarterForm(forms.ModelForm):
class Meta:
model = Quarter
fields = ['name']
# class ClinicalApproveSearchForm(forms.ModelForm):
# class Meta:
# model = Clinical
# fields = ['clinical_name']
class DiseaseForm(forms.ModelForm):
class Meta:
model = DiseaseReport
fields = [ 'surveillance_type',
'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'zoonosis',
'no_of_cases',
'no_of_deaths',
'no_destroyed',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'control_measures',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class DiseaseReportApproveOneForm(forms.ModelForm):
class Meta:
model = DiseaseReport
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class DiseaseReportApproveTwoForm(forms.ModelForm):
class Meta:
model = DiseaseReport
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class LabForm(forms.ModelForm):
class Meta:
model = Lab
fields = [ 'surveillance_type',
'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'incedence_rate',
'analysis_date',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class LabApproveOneForm(forms.ModelForm):
class Meta:
model = Lab
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class LabApproveTwoForm(forms.ModelForm):
class Meta:
model = Lab
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class AbattoirForm(forms.ModelForm):
class Meta:
model = Abattoir
fields = [ 'surveillance_type',
'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class AbattoirApproveOneForm(forms.ModelForm):
class Meta:
model = Abattoir
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class AbattoirApproveTwoForm(forms.ModelForm):
class Meta:
model = Abattoir
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class LocalityForm(forms.ModelForm):
class Meta:
model = Locality
fields = [ 'surveillance_type',
'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class LocalityApproveOneForm(forms.ModelForm):
class Meta:
model = Locality
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class LocalityApproveTwoForm(forms.ModelForm):
class Meta:
model = Locality
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class VaccinationForm(forms.ModelForm):
class Meta:
model = Vaccination
fields = [ 'surveillance_type',
'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class VaccinationApproveOneForm(forms.ModelForm):
class Meta:
model = Vaccination
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class VaccinationApproveTwoForm(forms.ModelForm):
class Meta:
model = Vaccination
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class VetInfraIndustryForm(forms.ModelForm):
class Meta:
model = VetInfraIndustry
fields = [ 'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class VetInfraIndustryApproveOneForm(forms.ModelForm):
class Meta:
model = VetInfraIndustry
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class VetInfraIndustryApproveTwoForm(forms.ModelForm):
class Meta:
model = VetInfraIndustry
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class PermitsForm(forms.ModelForm):
class Meta:
model = Permits
fields = [ 'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class PermitsApproveOneForm(forms.ModelForm):
class Meta:
model = Permits
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class PermitsApproveTwoForm(forms.ModelForm):
class Meta:
model = Permits
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class TransportFleetForm(forms.ModelForm):
class Meta:
model = TransportFleet
fields = [ 'quarter',
'species',
'species_breed',
'specie_sex',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'localty',
'localty_longitude',
'localty_latitude',
'pet_name',
'animal_group_size',
'animalID',
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
#'month',
#'year',
'specie_age',
'principal_signs',
'clinical_diagnosis',
'disease_code',
'clinical_prognosis',
'vaccination_history',
'notifiable_disease',
'notification_frequency',
'incedence_rate',
'motality_rate',
'mobidity_rate',
'lab_sample_collected',
'sample_type',
'sample_ID',
'lab_test_applied',
'lab_test_results',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class TransportFleetApproveOneForm(forms.ModelForm):
class Meta:
model = TransportFleet
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class TransportFleetApproveTwoForm(forms.ModelForm):
class Meta:
model = TransportFleet
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
class ProductionForm(forms.ModelForm):
class Meta:
model = Production
fields = [
'new_outbreak',
'reporter_name',
'start_date',
'end_date',
'quarter',
# 'month',
'localty',
'localty_longitude',
'localty_latitude',
'production_system',
'production_type',
'species',
'species_breed',
'animal_group_size',
'no_animal_producer',
'no_of_borns',
'no_of_deaths',
'animalID',
'no_of_milkltres',
'no_of_eggs',
'owner_name',
'owner_contact_no',
'owner_gender',
'owner_nin_no',
'cost_produced_per_milk_ltres',
'cost_produced_eggs',
'comment'
]
def clean_owner_name(self): # Validates the Clinical Name Field
owner_name = self.cleaned_data.get('owner_name')
if (owner_name == ""):
raise forms.ValidationError('This field cannot be left blank')
# for instance in Clinical.objects.all():
# if instance.owner_name == owner_name:
# raise forms.ValidationError('There is a clinical with the IP address ' + owner_name)
return owner_name
class ProductionApproveOneForm(forms.ModelForm):
class Meta:
model = Production
fields = ['approve_two', 'comment']
# fields = '__all__'
def clean_approve_one(self): # Validates the Clinical Name Field
approve_one = self.cleaned_data.get('approve_one')
if (approve_one == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_one
class ProductionApproveTwoForm(forms.ModelForm):
class Meta:
model = Production
fields = ['approve_one', 'comment']
# fields = ['approve_two']
def clean_approve_two(self): # Validates the Clinical Name Field
approve_two = self.cleaned_data.get('approve_two')
if (approve_two == None):
raise forms.ValidationError('Please choose one from the list')
# for instance in Clinical.objects.all():
# if instance.approve == approve:
# raise forms.ValidationError('There is a clinical with the name ' + approve)
return approve_two
|
import os
import re
# Environment variables
# DB configuration
DB_PORT = os.environ.get('DB_PORT',6379)
DB_HOST = os.environ.get('DB_HOST','localhost')
# App config
MAX_PROCESS = os.cpu_count()
MAX_LINES_TO_PARSE = 500
BLOCK_SIZE = 65536
DB_INGEST_INTERVAL = 5
URL_RE = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
SPLIT_WORDS_RE = r'\b(\w*)\b'
|
#!/usr/bin/python3
""" This script gets the commits (last 10) of a given repository.
It doesn't check arguments passed to the script like number or type.
You've been warned!
"""
import requests
from sys import argv
if __name__ == "__main__":
url = "https://api.github.com/repos/"
query = "{}/{}/commits".format(argv[2], argv[1])
res = requests.get(url + query)
res = res.json()
for num, commit in enumerate(res):
if num == 10:
break
sha = commit["sha"]
autor = commit.get("commit").get("author").get("name")
print(sha + ": " + autor)
|
import logging
from collections import OrderedDict
from one.alf.files import session_path_parts
import warnings
from ibllib.pipes.base_tasks import ExperimentDescriptionRegisterRaw
from ibllib.pipes import tasks, training_status
from ibllib.io import ffmpeg
from ibllib.io.extractors.base import get_session_extractor_type
from ibllib.io.extractors import training_audio, bpod_trials, camera
from ibllib.qc.camera import CameraQC
from ibllib.qc.task_metrics import TaskQC, HabituationQC
from ibllib.qc.task_extractors import TaskQCExtractor
_logger = logging.getLogger(__name__)
warnings.warn('`pipes.training_preprocessing` to be removed in favour of dynamic pipeline')
# level 0
class TrainingRegisterRaw(tasks.Task):
priority = 100
def _run(self):
return []
class TrainingTrials(tasks.Task):
priority = 90
level = 0
force = False
signature = {
'input_files': [('_iblrig_taskData.raw.*', 'raw_behavior_data', True),
('_iblrig_taskSettings.raw.*', 'raw_behavior_data', True),
('_iblrig_encoderEvents.raw*', 'raw_behavior_data', True),
('_iblrig_encoderPositions.raw*', 'raw_behavior_data', True)],
'output_files': [('*trials.goCueTrigger_times.npy', 'alf', True),
('*trials.table.pqt', 'alf', True),
('*wheel.position.npy', 'alf', True),
('*wheel.timestamps.npy', 'alf', True),
('*wheelMoves.intervals.npy', 'alf', True),
('*wheelMoves.peakAmplitude.npy', 'alf', True)]
}
def _run(self):
"""
Extracts an iblrig training session
"""
trials, wheel, output_files = bpod_trials.extract_all(self.session_path, save=True)
if trials is None:
return None
if self.one is None or self.one.offline:
return output_files
# Run the task QC
# Compile task data for QC
type = get_session_extractor_type(self.session_path)
if type == 'habituation':
qc = HabituationQC(self.session_path, one=self.one)
qc.extractor = TaskQCExtractor(self.session_path, one=self.one)
else: # Update wheel data
qc = TaskQC(self.session_path, one=self.one)
qc.extractor = TaskQCExtractor(self.session_path, one=self.one)
qc.extractor.wheel_encoding = 'X1'
# Aggregate and update Alyx QC fields
qc.run(update=True)
return output_files
class TrainingVideoCompress(tasks.Task):
priority = 90
io_charge = 100
job_size = 'large'
def _run(self):
# avi to mp4 compression
command = ('ffmpeg -i {file_in} -y -nostdin -codec:v libx264 -preset slow -crf 29 '
'-nostats -codec:a copy {file_out}')
output_files = ffmpeg.iblrig_video_compression(self.session_path, command)
if len(output_files) == 0:
_logger.info('No compressed videos found; skipping timestamp extraction')
return # labels the task as empty if no output
# Video timestamps extraction
data, files = camera.extract_all(self.session_path, save=True, video_path=output_files[0])
output_files.extend(files)
# Video QC
CameraQC(self.session_path, 'left', one=self.one, stream=False).run(update=True)
return output_files
class TrainingAudio(tasks.Task):
"""
Computes raw electrophysiology QC
"""
cpu = 2
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
return training_audio.extract_sound(self.session_path, save=True, delete=True)
# level 1
class TrainingDLC(tasks.Task):
def _run(self):
"""empty placeholder for job creation only"""
pass
class TrainingStatus(tasks.Task):
priority = 90
level = 1
force = False
signature = {
'input_files': [('_iblrig_taskData.raw.*', 'raw_behavior_data', True),
('_iblrig_taskSettings.raw.*', 'raw_behavior_data', True),
('*trials.table.pqt', 'alf', True)],
'output_files': []
}
def _run(self, upload=True):
"""
Extracts training status for subject
"""
df = training_status.get_latest_training_information(self.session_path, self.one)
if df is not None:
training_status.make_plots(self.session_path, self.one, df=df, save=True, upload=upload)
# Update status map in JSON field of subjects endpoint
# TODO This requires exposing the json field of the subjects endpoint
if self.one and not self.one.offline:
_logger.debug('Updating JSON field of subjects endpoint')
try:
status = (df.set_index('date')[['training_status', 'session_path']].drop_duplicates(
subset='training_status', keep='first').to_dict())
date, sess = status.items()
data = {'trained_criteria': {v.replace(' ', '_'): (k, self.one.path2eid(sess[1][k])) for k, v
in date[1].items()}}
_, subject, *_ = session_path_parts(self.session_path)
self.one.alyx.json_field_update('subjects', subject, data=data)
except KeyError:
_logger.error('Failed to update subject training status on Alyx: json field not available')
output_files = []
return output_files
class TrainingExtractionPipeline(tasks.Pipeline):
label = __name__
def __init__(self, session_path, **kwargs):
super(TrainingExtractionPipeline, self).__init__(session_path, **kwargs)
tasks = OrderedDict()
self.session_path = session_path
# level 0
tasks['ExperimentDescriptionRegisterRaw'] = ExperimentDescriptionRegisterRaw(self.session_path)
tasks['TrainingRegisterRaw'] = TrainingRegisterRaw(self.session_path)
tasks['TrainingTrials'] = TrainingTrials(self.session_path)
tasks['TrainingVideoCompress'] = TrainingVideoCompress(self.session_path)
tasks['TrainingAudio'] = TrainingAudio(self.session_path)
# level 1
tasks['TrainingStatus'] = TrainingStatus(self.session_path, parents=[tasks['TrainingTrials']])
tasks['TrainingDLC'] = TrainingDLC(
self.session_path, parents=[tasks['TrainingVideoCompress']])
self.tasks = tasks
|
from itertools import islice, product
def parse_file(file):
lines = iter(file)
next(lines) # Discard the number of test cases T
while lines:
target = next(lines)
food_count = int(next(lines))
foods = islice(lines, food_count)
yield format_testcase(target, foods)
def format_testcase(target, foods):
return {
'target': format_macronutrients(target),
'foods': [format_macronutrients(food) for food in foods],
}
def format_macronutrients(macronutrients):
return tuple(int(nutrient) for nutrient in macronutrients.split(' '))
def evaluate_testcase(testcase):
target = testcase['target']
foods = testcase['foods']
for combination in product((True, False), repeat=len(foods)):
selected_foods = [food for index, food in enumerate(foods) if combination[index]]
total = tuple(sum(nutrients) for nutrients in zip(*selected_foods))
if total == target:
return 'yes'
return 'no'
if __name__ == '__main__':
with open('input.txt', 'r') as file:
for index, testcase in enumerate(parse_file(file)):
print 'Case #{n}: {result}'.format(
n=index+1,
result=evaluate_testcase(testcase),
)
|
#! /bin/python
import json
import os
import subprocess
all_traces = []
startup_bbs = []
merged_bbtrace = {}
NumKeysAltered = 0
NumEmptyKeys = 0
def get_trace_files():
j_files = []
j_files_startup = []
j_files_trace = []
s = subprocess.check_output(['find', '/tmp/', '-maxdepth', '1', '-name', "rcvry_bbtrace_dump.json*"])
j_files = [ f for f in s.split("\n") if f ]
j_files_startup = [ startup for startup in j_files if ".startup" in startup ]
j_files_trace = [ trace for trace in j_files if not ".startup" in trace ]
print "Num startup trace files: %d" % (len(j_files_startup))
print "Num trace files: %d" % (len(j_files_trace))
return j_files_trace, j_files_startup
def load_all_traces():
jfile = "/tmp/rcvry_bbtrace_dump.json"
jd = json.JSONDecoder()
trace_files, startup_files = get_trace_files()
for jfile in trace_files:
print "Opening json file: %s" % (jfile)
with open(jfile) as jf:
trace = jf.readline();
while trace:
bbtrace = jd.decode(trace)
all_traces.append(bbtrace)
trace = jf.readline();
for jfile in startup_files:
print "Opening startup json file: %s" % (jfile)
with open(jfile) as jf:
startup = jf.readline();
while startup:
bbs = jd.decode(startup)
for b in [bb for bb in bbs if bb not in startup_bbs]:
startup_bbs.append(b)
startup = jf.readline();
return
def merge_bbtraces():
for i in range(0, len(all_traces)):
bbt = {}
bbt = all_traces[i]
# iterate over each trace dictionary
for site in bbt.keys():
if (site in merged_bbtrace.keys()):
for bb in bbt[site]:
if not (bb in merged_bbtrace[site]):
merged_bbtrace[site].append(bb)
else:
merged_bbtrace[site] = bbt[site]
return
def rm_startup_bbs():
global NumKeysAltered, NumEmptyKeys
trace_bbs = []
for k in merged_bbtrace.keys():
# print "k: %s" % (k)
trace_bbs = list(set(merged_bbtrace[k]))
altered = False
rm_bbs = []
for t in trace_bbs:
for s in startup_bbs:
if (s != 0) and (t == s):
if not s in rm_bbs:
rm_bbs.append(s)
altered = True
if altered:
NumKeysAltered = NumKeysAltered + 1
for b in rm_bbs:
merged_bbtrace[k].remove(b)
# print "\tremoved: %d from key: %s" % (b, k)
empty_keys = []
for k in merged_bbtrace.keys():
t = merged_bbtrace[k]
if (len(t) == 1) and (t[0] == 0):
empty_keys.append(k)
for e in empty_keys:
del merged_bbtrace[e]
NumEmptyKeys = NumEmptyKeys + 1
return
def dump_to_file(what, dest_json):
with open(dest_json, "w+") as c:
json.dump(what, c)
return
def dump_merged_to_file():
common_json = "./rcvry_bbtrace_dump.merged.json"
with open(common_json, "w+") as c:
json.dump(merged_bbtrace, c)
return
if __name__ == "__main__":
load_all_traces()
print "Num trace-lists in all_traces: %d" % (len(all_traces))
merge_bbtraces()
print "Num sites after the merge: %d" % (len(merged_bbtrace.keys()))
print "Num trace-lists in merged_bbtrace: %d" % (len(merged_bbtrace.keys()))
print "Num bbs in startup: %d" % (len(startup_bbs))
rm_startup_bbs()
print "Num trace-lists altered: %d" % (NumKeysAltered)
print "Num trace-lists in altered merged_bbtrace: %d" % (len(merged_bbtrace.keys()))
print "Num empty keys removed: %d" % (NumEmptyKeys)
dump_to_file(merged_bbtrace, "./rcvry_bbtrace_dump.merged.json")
dump_to_file(startup_bbs, "./rcvry_startup_dump.merged.json")
|
# -*- coding: utf-8 -*-
import time
import sys
import RPi.GPIO as GPIO
from sklearn.cluster import KMeans
import pickle
repeat = 150
sleep_sec = 1
exist_list = []
not_exist_list = []
def reading():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
TRIG = 12
ECHO = 16
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.output(TRIG, GPIO.LOW)
time.sleep(0.3)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
signaloff = time.time()
while GPIO.input(ECHO) == 1:
signalon = time.time()
timepassed = signalon - signaloff
distance = timepassed * 17000
return distance
GPIO.cleanup()
start = input("機械学習を始めます。よろしいですか?(yes or no) : ")
if start != "yes":
exit(0)
print("まず、人がいないデータを2分間収集します。人がいないシチュエーションを作ってください。")
input("準備ができたら、enterキーを押してください。enterキー押下後5秒後に開始します。")
for i in [5,4,3,2,1]:
print(str(i) + "秒前")
time.sleep(1)
print("スタート")
for i in range(repeat):
value = reading()
exist_list.append(value)
msg = "超音波センサー: {0} cm".format(reading())
print(msg)
time.sleep(sleep_sec)
print("つぎに、人がいるデータを2分間収集します。人がいるシチュエーションを作ってください。")
input("準備ができたら、enterキーを押してください。enterキー押下後5秒後に開始します。")
for i in [5,4,3,2,1]:
print(str(i) + "秒前")
time.sleep(1)
print("スタート")
for i in range(repeat):
value = reading()
not_exist_list.append(value)
msg = "超音波センサー: {0} cm".format(reading())
print(msg)
time.sleep(sleep_sec)
print("学習処理をします。少々時間がかかるかもしれません。")
learning_list_of_exist = [[value] for value in exist_list]
learning_list_of_not_exist = [[value] for value in not_exist_list]
learning_list = learning_list_of_exist + learning_list_of_not_exist
#import pdb; pdb.set_trace()
model = KMeans(n_clusters=2)
model.fit(learning_list)
print(model.predict([[exist_list[0]]]))
print(model.predict([[not_exist_list[0]]]))
pickle.dump(model, open("usonic_model.sav", "wb")) |
from rest_framework.permissions import BasePermission
class TempPermission(BasePermission):
"""docstring for TempPermission"""
def has_permission(self,request,view):
"""该请求是否有对当前视图的权限"""
if request.user == "管理员":
return True
# GenericAPIView中get_object时调用
def has_object_permission(self, request, view, obj):
"""
视图继承GenericAPIView,并在其中使用get_object时获取对象时,触发单独对象权限验证
Return `True` if permission is granted, `False` otherwise.
:param request:
:param view:
:param obj:
:return: True有权限;False无权限
"""
if request.user == "管理员":
return True
|
import csv
import numpy as np
def loadParamter(paramterfile):
parafile = file(paramterfile)
reader = csv.reader(parafile)
paramter = reader.next()
lparamter = [0 for i in range(len(paramter)+1)]
i = 0
for l in paramter:
lparamter[i] = int(l)
i = i+1
psum = sum(lparamter)
i = 0
for p in lparamter:
lparamter[i] = (p/(psum*1.0))
i = i+1
return lparamter
def calScore(w,paramter):
score = 0
#i = 0
for i in range(len(w)):
score = score + float(w[i]) * paramter[i]
return score
def getScore(paramter):
extration_file = file('extration.csv')
lable_file = file('label.csv')
e_reader = csv.reader(extration_file)
l_reader = csv.reader(lable_file)
scores = []
for w in e_reader:
scores.append(calScore(w,paramter))
npscores = np.array(scores)
#print type(npscores[0])
argscores = np.argsort(-npscores)
buy = []
i = 0
for j in l_reader:
# if 1 == int(j[0]):
# buy.append(i)
buy.append(j)
i = i + 1
count = 0
for i in range(50000):
#if argscores[i] in buy:
# count = count + 1
#print scores[argscores[i]]
print buy[argscores[i]][0]+','+buy[argscores[i]][1]
i = i+ 1
#print count,len(buy)
#p = count / (len(buy) * 1.0)
#c = count / (1.0*50000)
#f = 2*p*c/(p+c)
#print p,c,f
if __name__ == '__main__':
paramter = loadParamter('time_weight.csv')
print 'user_id,item_id'
#print paramter
getScore(paramter)
|
from ..node_common.queryfunc import *
from models import *
def setupResults(sql):
q = sql2Q(sql)
log.debug('Just ran sql2Q(sql); setting up QuerySets now.')
transs = Transition.objects.filter(q)
ntranss=transs.count()
if TRANSLIM < ntranss and (not sql.requestables or 'radiative' in sql.requestables):
percentage = '%.1f'%(float(TRANSLIM)/ntranss *100)
newmax = transs[TRANSLIM].wave
transs = Transition.objects.filter(q,Q(wave__lt=newmax))
log.debug('Truncated results to %s, i.e %s A.'%(TRANSLIM,newmax))
else:
percentage=None
log.debug('Transitions QuerySet set up. References next.')
refIDs = TransRef.objects.filter(trans_id__in=transs).values_list('ref_id', flat=True)
print "TransRef refIDs:", refIDs.count()
#sources = Reference.objects.all()
## about 100 times slower than objects.all() objects
#refIDs = set(tuple(transs.values_list('wavevac_ref_id', flat=True)) +
# tuple(transs.values_list('loggf_ref_id', flat=True)) +
# tuple(transs.values_list('gammarad_ref_id', flat=True)) +
# tuple(transs.values_list('gammastark_ref_id', flat=True)) +
# tuple(transs.values_list('waals_ref', flat=True)))
sources = Reference.objects.filter(pk__in=refIDs)
log.debug('Sources QuerySet set up. References next.')
addStates = (not sql.requestables or 'atomstates' in sql.requestables)
atoms,nspecies,nstates = getSpeciesWithStates(transs,Species,State,addStates)
if ntranss:
size_estimate='%.2f'%(ntranss*0.0014 + 0.01)
else:
size_estimate='0.00'
headerinfo={\
'TRUNCATED':percentage,
'COUNT-ATOMS':nspecies,
'COUNT-SPECIES':nspecies,
'COUNT-STATES':nstates,
'COUNT-RADIATIVE':ntranss,
'APPROX-SIZE':size_estimate,
}
log.debug('Returning from setupResults()')
return {'RadTrans':transs,
'Atoms':atoms,
'Sources':sources,
'HeaderInfo':headerinfo,
'Environments':Environments, #set up statically in node_common.models
'Methods':getMethods(), #defined in node_common.queryfuncs
'Functions':Functions #set up statically in node_common.models
}
|
import os
PATHS = [
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/GSN/Context.sol",
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/math/SafeMath.sol",
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/token/ERC20/IERC20.sol",
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/token/ERC20/ERC20.sol",
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/token/ERC20/SafeERC20.sol",
"~/.brownie/packages/OpenZeppelin/openzeppelin-contracts@3.2.0/contracts/utils/Address.sol",
"contracts/TestnetToken.sol",
]
PREFIX = """
// SPDX-License-Identifier: MIT
pragma solidity ^0.6.12;
"""
IGNORE = [
"// SPDX-License-Identifier:",
"import ",
"pragma ",
]
def main():
lines = []
for path in PATHS:
path = os.path.expanduser(path)
with open(path, "r") as f:
for line in f:
if all(not line.strip().startswith(s) for s in IGNORE):
lines.append(line)
print(PREFIX + "".join(lines))
if __name__ == "__main__":
main()
|
from tornado import ioloop, web, httpserver
from tornado.options import options
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE_DIR))
print(sys.path)
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'MiracleOps.settings' # 设置项目的配置文件
django.setup()
from terminal.handlers import *
from terminal.config import init_config
from terminal.ioloop import IOLoop
def welcome(port):
print('''
Welcome to the webssh!
__ __
_ _____ / /_ __________/ /_
| | /| / / _ \/ __ \/ ___/ ___/ __ \\
| |/ |/ / __/ /_/ (__ |__ ) / / /
|__/|__/\___/_.___/____/____/_/ /_/
Now start~
Please visit the localhost:%s from the explorer~
''' % port)
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
handlers = [
(r"/", IndexHandler),
(r"/login", IndexHandler),
(r"/ws", WSHandler)
]
class Application(web.Application):
def __init__(self):
super(Application, self).__init__(handlers, **settings)
def main():
init_config()
options.parse_config_file(os.path.join(BASE_DIR, "webssh.conf"))
http_server = httpserver.HTTPServer(Application())
http_server.listen(options.port, address="0.0.0.0")
IOLoop.instance().start()
welcome(options.port)
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
# Generated by Django 3.1.7 on 2021-05-26 11:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sampleapp', '0016_auto_20210526_0548'),
]
operations = [
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(max_length=100, null=True)),
],
),
migrations.AddField(
model_name='shoppingcart',
name='size',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sampleapp.size'),
),
]
|
import xlsxwriter
import requests
from bs4 import BeautifulSoup
url = 'http://digidb.io/digimon-list'
html = requests.get(url)
soup = BeautifulSoup(html.content, "html.parser")
#TABEL
table_header = soup.find_all("th")
table_tbody = soup.find("tbody")
table_tr = table_tbody.find_all("tr")
#AMBIL HEADER
list_header=["No", "Link"]
for i in table_header[1:]:
list_header.append(i.text)
#AMBIL DATA TABEL
list_data = []
for i in table_tr:
list_tampung = []
table_td = i.find_all("td")
for a in table_td:
tampung = a.text.replace(u'\xa0',u'')
list_tampung.append(tampung)
list_data.append(list_tampung)
#AMBIL LINK IMAGE
table_img = table_tbody.find_all("img")
imagesrc = []
for i in table_img:
imagesrc.append(i['src'])
#MASUKIN LINK IMAGE KE LIST TABEL
j = 0
for i in list_data:
i.insert(1,j+1)
list_data[j] = i
j+=1
#EXCEL
#ADD HEADER KE LIST
list_data_excel = list_data
list_data_excel.insert(0,list_header)
book = xlsxwriter.Workbook("ExcelDigimon.xlsx")
sheet = book.add_worksheet("DatabaseDigimon")
row = 0
for row in range(len(list_data_excel)):
for col in range(len(list_data_excel[row])):
sheet.write(row,col,list_data_excel[row][col])
book.close() |
from flask import json
from nose.tools import eq_
from server import app
client = app.test_client()
def test_hello_world():
# When: I access root path
resp = client.get('/')
# Then: Expected response is returned
eq_(resp.status_code, 200)
eq_(resp.headers['Content-Type'], 'application/json')
data = json.loads(resp.data.decode())
eq_(data['message'].startswith('Hello'), True)
|
import math
class Ship:
def __init__(self, x_location, y_location, x_spd, y_spd, angle):
self.__angle = angle
self.__x_params = [x_location, x_spd]
self.__y_params = [y_location, y_spd]
self.__radius = 1
def get_drawing_param(self):
lst = [self.__x_params[0], self.__y_params[0], self.__angle]
return lst
def move_ship(self, min_x, max_x, min_y, max_y):
prev_x_spd = self.__x_params[1]
prev_x_lct = self.__x_params[0]
prev_y_spd = self.__y_params[1]
prev_y_lct = self.__y_params[0]
x_spot = min_x + (prev_x_lct + prev_x_spd - min_x) % (max_x - min_x)
y_spot = min_y + (prev_y_lct + prev_y_spd - min_y) % (max_y - min_y)
self.set_location(x_spot, y_spot)
def set_location(self, new_x, new_y):
self.__x_params[0] = new_x
self.__y_params[0] = new_y
def rotate(self, direction):
if direction == 'l':
new_angle = (self.__angle + 7) % 360
self.__angle = new_angle
if direction == 'r':
new_angle = (self.__angle - 7) % 360
self.__angle = new_angle
def get_angle(self):
return self.__angle
def update_spd(self):
angle = math.radians(self.__angle)
self.__x_params[1] += math.cos(angle)
self.__y_params[1] += math.sin(angle)
def get_radius(self):
return self.__radius
def get_location(self):
x_loct = self.__x_params[0]
y_loct = self.__y_params[0]
return x_loct, y_loct
def get_speed(self):
return self.__x_params[1], self.__y_params[1]
|
import sys
import os
import subprocess
import docker
import shutil
import countconvert
datasetpath = "./dataset"
saveFilePath = "/cve/saveresult"
saveHostPath = "./result"
def Select_Algo(list_algo,list_dataset):
#Select Algorithm
print("#Select Machinelearning Algorithms(Select 0 if you want to add an algorithm)")
print("0 : Add Algorithm")
for index,value in enumerate(list_algo,start=1):
print(index,":",value)
select_algo=input('Select Numbers:').split(',')
if select_algo[0] =='0':
print("#ADD NEW ALGORITHM")
name = input('#Name:')
image = input('#Image(ex.wnsghks30/softmax, mhiunn09/randomforest, jihyeon/cnn):')
argument = 'docker run -i -t -d --name ' + name + ' ' + image + ' /bin/bash'
subprocess.call(argument,shell=True)
argument = 'docker ps'
subprocess.call(argument,shell=True)
Copy_Dataset_algo(name)
Machine_Learn(name,list_dataset)
list_algo=List_Algo()
list_algo,select_algo=Select_Algo(list_algo,list_dataset)
return list_algo,select_algo
def Select_Dataset(list_algo,list_dataset,datasetpath):
#Select Dataset
print("#Select Datasets(Select 0 if you want to add an dataset)")
print("0 : Add dataset")
for index,value in enumerate(list_dataset,start=1):
print(index,":",value)
select_dataset=input('Select Numbers:').split(',')
if select_dataset[0] == '0':
print("#ADD NEW DATASET")
name = input('#Name:')
path = input('#Path:')
destination = datasetpath + '/' + name
shutil.copytree(path,destination)
Copy_Dataset_data(name,list_algo)
list_dataset=List_Dataset(datasetpath)
list_dataset,select_dataset=Select_Dataset(list_algo,list_dataset,datasetpath)
return list_dataset,select_dataset
#1 - make algo -> all dataset to one container
#Copy Host dataset to new_container
def Copy_Dataset_algo(container_name):
argument = 'docker cp ./dataset ' + container_name+ ':/cve/'
print(argument)
subprocess.call(argument,shell=True)
#2 - make dataset -> one dataset to all container
def Copy_Dataset_data(dataset_name,list_algo):
print("INSERT COPY_DATASET_DATA")
print(list_algo)
for algo in list_algo:
argument = 'docker cp ./dataset/' + dataset_name + ' ' + algo + ':/cve/dataset/'+dataset_name
print(argument)
subprocess.call(argument,shell=True)
argument = 'docker exec ' + algo + ' python3 save.py ' + dataset_name
print(argument)
subprocess.call(argument,shell=True)
#Learn new algorithm with all dataset
def Machine_Learn(name,list_dataset):
#exec selected algorithm in container (need to fix run.py)
for dataset in list_dataset:
#Run Macine learning
argument = 'docker exec ' + name + ' python3 save.py ' + dataset
print(argument)
subprocess.call(argument,shell=True)
#Save each container name to list_algo[]
def List_Algo():
list_algo=[]
client = docker.from_env()
for container in client.containers.list():
list_algo.append(container.name)
return list_algo
def List_Dataset(datasetpath):
list_dataset = os.listdir(datasetpath)
return list_dataset
#Copy Result value container to host
def Copy_Result(list_algo,list_dataset,select_algo,select_dataset,saveFilePath,saveHostPath):
for algo in select_algo:
for data in select_dataset:
argument = 'docker cp ' + list_algo[int(algo) -1] + ':' + saveFilePath + '/' +list_algo[int(algo)-1] + '_' + list_dataset[int(data)-1] + ' ' + saveHostPath
subprocess.call(argument,shell=True)
#Print result in host directory
def Print_Result(saveHostPath):
path_dir = saveHostPath
file_lists = os.listdir(path_dir)
print("RESULT")
for file in file_lists:
f = open(saveHostPath + '/' + file, "r")
line = f.readline()
print(line)
os.remove(saveHostPath + '/' + file)
f.close()
#select binaryfile to explore and transform like dataset
def Select_File(list_algo,list_dataset,select_algo,select_dataset):
source_path = input('Insert File Path To Explore Vulnerabilities : ')
save_path = './cnt'
if not os.path.isdir(save_path):
os.makedirs(save_path)
outputfilename = save_path+'/NEWBINARY'
os.system('rm ' + outputfilename)
os.system('rm ' + outputfilename + '.txt')
os.system('flawfinder --dataonly --quiet '+ source_path +" >>" + outputfilename+".txt")
os.system('grep -c "78" '+ outputfilename+ ".txt >>"+ outputfilename)
os.system('grep -c "120" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "126" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "134" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "190" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "327" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "377" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "676" '+ outputfilename+".txt >>"+ outputfilename)
os.system('grep -c "785" '+ outputfilename+".txt >>"+ outputfilename)
matrix=countconvert.getarray(outputfilename)
countconvert.createImage(matrix,outputfilename)
print("Copy NEWBINARY to select algorithm")
for algo in select_algo:
argument = 'docker cp ' + outputfilename + ' ' + list_algo[int(algo)-1] + ':/cve/newbinary/'
subprocess.call(argument,shell=True)
argument = 'docker cp ' + outputfilename + '.png ' + list_algo[int(algo)-1] + ':/cve/newbinary/'
subprocess.call(argument,shell=True)
def Make_Result(list_algo,list_dataset,select_algo,select_dataset):
for algo in select_algo:
for data in select_dataset:
argument = 'docker exec ' +list_algo[int(algo)-1] + ' python3 load.py ' + list_dataset[int(data)-1]
print(argument)
subprocess.call(argument,shell=True)
def main():
#Select Path
#saveFilePath = "/cve/saveResult"
#saveHostPath = "./result"
#datasetPath
list_algo=List_Algo()
list_dataset=List_Dataset(datasetpath)
list_algo,select_algo=Select_Algo(list_algo,list_dataset)
list_dataset,select_dataset=Select_Dataset(list_algo,list_dataset,datasetpath)
print("#select_Algo=",select_algo,"Dataset=",select_dataset)
Select_File(list_algo,list_dataset,select_algo,select_dataset)
Make_Result(list_algo,list_dataset,select_algo,select_dataset)
Copy_Result(list_algo,list_dataset,select_algo,select_dataset,saveFilePath,saveHostPath)
Print_Result(saveHostPath)
if __name__ == '__main__':
main()
|
import random
from ability import Ability
class Weapon(Ability):
def attack(self):
random_value = random.randint(int(self.max_damage)//2, int(self.max_damage))
return random_value
|
# -*- coding: utf-8 -*-
# Ecoation RawProcessor Configuration
#
# Created by: Farzad Khandan (farzadkhandan@ecoation.com)
#
from base.cloud_provider import CloudProviderFactory
from base.proxy import RecordProcessorProxy
from providers.aws import aws
# System cloud provider
CLOUD_PROVIDER_NAME = 'aws'
# Cloud Providers
# register new cloud providers here
CloudProviderFactory.register('aws', aws)
# Cloud configuration
SYS_CLOUD_PROVIDER = CloudProviderFactory.get_provider(CLOUD_PROVIDER_NAME)
# System Proxy
SYS_PROXY = RecordProcessorProxy
SYS_PROXY.register_cloud_provider(SYS_CLOUD_PROVIDER)
|
# -*- coding: utf-8 -*-
from PyQt5.QtCore import QObject
from .model import Action
class Win32PowerActionManager(QObject):
def __init__(self, parent):
super().__init__(parent)
self.actions = [Action.Null]
def act(self, action):
raise NotImplementedError("Cannot do {}".format(action))
|
"""
ObservationInfoモジュール
ObservationInfoクラスの基本定義
"""
import dataclasses
from datetime import datetime
@dataclasses.dataclass(frozen=True)
class ObservationInfo:
observation_ID: str # 観測名
description: str # 観測のターゲット
start_time: datetime # 観測開始時刻
end_time: datetime # 観測終了時刻
PI_name: str # PI名
contact_name: str # コンタクト先
band: str # 観測バンド
timestamp: datetime # スケジュールファイルの最終更新時刻
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Devi editare il file di configurazione MyIPCameraBot_config.py
Puoi fare riferimento al file di esempio MyIPCameraBot_config.example
-
You must edit the configuration file MyIPCameraBot_config.py
You may refer to the sample files MyIPCameraBot_config.example
"""
import MyIPCameraBot_config
import sys
import time
import os
import glob
import telepot
import requests
import socket
import logging
import logging.handlers
import io
from datetime import datetime
from PIL import Image
from requests.auth import HTTPBasicAuth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from datetime import datetime
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove, ForceReply
from base64 import b64encode
# pip install Pillow
# ------ GESTORE DEI COMANDI DEL BOT
class BotCommandsHandler(telepot.Bot):
# definisco il gestore che deve essere invocato nel loop del bot
def handle(self, msg):
try:
my_logger.debug("COMMAND: " + str(msg))
flavor = telepot.flavor(msg)
if flavor == 'chat':
content_type, chat_type, chat_id = telepot.glance(msg)
my_logger.info("Chat message: " + content_type + " - Type: " + chat_type + " - ID: " + str(chat_id) + " - Command: " + msg['text'])
# verifico se l'utente da cui ho ricevuto il comando è censito
user_exist = False
for u in MyIPCameraBot_config.users:
if u is None:
break
if u['telegram_id'] == str(chat_id):
user_exist = True
my_logger.debug("Check userID " + u['telegram_id'] + ": user exist...")
# se l'utente non è censito, abortisco
# questo controllo è per evitare che le risposte dei messaggi
# vadano a richiedenti non abilitati
if user_exist == False:
my_logger.info("User NOT exist!!!")
return None
# seleziono il tipo di comando da elaborare
if msg['text'] == '/help':
self.__comm_help(chat_id)
elif msg['text'] == '/start':
self.__comm_help(chat_id)
elif msg['text'] == '/jpg':
self.__comm_jpg(chat_id)
elif msg['text'] == '/status':
self.__comm_status(chat_id)
elif msg['text'] == '/motion':
self.__comm_motion(chat_id)
elif msg['text'] == 'Motion Detection OFF':
self.__comm_motion_detection(chat_id, msg["from"]["first_name"], 0)
elif msg['text'] == 'Motion Detection ON':
self.__comm_motion_detection(chat_id, msg["from"]["first_name"], 1)
elif msg['text'] == '/night':
self.__comm_night(chat_id)
elif msg['text'] == 'IR Automatic':
self.__comm_night_IR(chat_id, 0)
elif msg['text'] == 'IR On':
self.__comm_night_IR(chat_id, 2)
elif msg['text'] == 'IR Off':
self.__comm_night_IR(chat_id, 3)
elif msg['text'] == '/rep':
self.__comm_rep(chat_id)
elif msg['text'] == 'Clear repository':
self.__comm_rep_clear(chat_id)
elif msg['text'] == 'Cancel':
self.__comm_rep_cancel(chat_id)
else:
self.__comm_nonCapisco(chat_id)
else:
raise telepot.BadFlavor(msg)
except:
my_logger.exception("Unable to parse command: " + str(sys.exc_info()[0]))
# ------------------------------------
# CameraName = DCS - 932LB
# Model = DCS - 932LB1
# HardwareVersion = B
# CGIVersion=2.1.8
# ------------------------------------
# /motion.cgi?MotionDetectionEnable=0&ConfigReboot=no
# /daynight.cgi?DayNightMode=0&ConfigReboot=0
# ------------------------------------
def __call_camera(selfself, cam, type_url):
try:
url_complete = 'http://' + cam['ip'] + ":" + cam['port'] + type_url
my_logger.debug("CALL: " + cam['id'] + ' --> ' + url_complete)
headers = {'Referer': 'http://' + cam['ip'] + ":" + cam['port'] + ' HTTP/1.0',
'Authorization': 'Basic ' + b64encode("{0}:{1}".format(cam['user'], cam['pwd']))}
my_logger.debug("Headers: " + str(headers))
r = requests.get(url_complete, headers=headers, auth=HTTPBasicAuth(cam['user'], cam['pwd']))
my_logger.info(cam['id'] + ' --> ' + "HTTP Status: {0}".format(r.status_code))
if r.status_code != 200:
my_logger.debug("Unable to contact camera!")
return r
except:
my_logger.exception("Unable to call camera! " + str(sys.exc_info()[0]))
def __comm_help(self, toUser):
try:
bot.sendMessage(toUser, helpMessage)
my_logger.info('HELP message sent to user ' + str(toUser))
except:
my_logger.exception("Unable to send help message: " + str(sys.exc_info()[0]))
def __comm_jpg(self, toUser):
try:
for camera in MyIPCameraBot_config.camere:
r = self.__call_camera(camera, camera['url_jpg'])
if r.status_code == 200:
try:
my_logger.debug("JPG data available")
f = io.BytesIO(r.content)
img = Image.open(f)
now = datetime.now()
jpg_filename = MyIPCameraBot_config.IMAGES_PATH + '/{0}{1}.jpg'.format(camera['id'], now.strftime("%Y%m%d%H%M%S"))
img.save(jpg_filename, 'JPEG')
my_logger.info("Create JPEG: " + jpg_filename)
except:
my_logger.exception("Unable to create image file.")
finally:
f.close()
img.close()
send_bot_image(toUser, jpg_filename)
else:
bot.sendMessage(toUser, 'oops! Unable to contact camera ' + camera['id'])
except:
my_logger.exception("Unable to get image: " + str(sys.exc_info()))
finally:
time.sleep(3)
def __comm_status(self, toUser):
try:
hostname=socket.gethostname()
user = self.__getUser(toUser)
if user['push'] is True:
notifiche="ON"
else:
notifiche= "OFF"
statusMinutes = ((datetime.now()-startTime).total_seconds()) / 60 / 60
bot.sendMessage(toUser, "Hi {0}. I run on {1} and it's all ok!\n"
"I am alert from {2:0,.1f} hours!\n"
"Your push notifications are {3}!\n\n"
"more info at www.ccworld.it\n".format(user['name'], hostname, statusMinutes, notifiche))
my_logger.info("STATUS sent to user " + str(toUser))
except:
my_logger.exception("Command failed! " + str(sys.exc_info()[0]))
def __comm_motion(self, toUser):
try:
show_keyboard = ReplyKeyboardMarkup(keyboard=[[KeyboardButton(text='Motion Detection ON'),
KeyboardButton(text='Motion Detection OFF')],
])
my_logger.debug("Reply keyboard showed.")
bot.sendMessage(toUser, "Set motion detection: ", reply_markup=show_keyboard)
my_logger.info("MOTION message sent to user " + str(toUser))
except:
my_logger.exception("Command failed! " + str(sys.exc_info()[0]))
def __comm_motion_detection(self, toUser, first_name, enabled):
try:
hide_keyboard = ReplyKeyboardRemove()
my_logger.debug("Keyboard hided")
bot.sendMessage(toUser, 'wait...', reply_markup=hide_keyboard)
for camera in MyIPCameraBot_config.camere:
try:
r = self.__call_camera(camera, camera['url_motion_detection'].format(enabled))
if r.status_code == 200:
for u in MyIPCameraBot_config.users:
if u is None:
continue
if u['push'] is True:
bot.sendMessage(u['telegram_id'], 'Camera: {0} - Motion detection:{1} '
'by {2}'.format(camera['id'], enabled, u['name']))
my_logger.info("MOTION command sent to user " + u['name'])
else:
bot.sendMessage(toUser, 'oops! Unable to contact camera ' + camera['id'])
except:
print(str(datetime.now()), 'Command failed! ', sys.exc_info()[0], toUser)
except:
my_logger.exception("Command failed! " + str(sys.exc_info()[0]))
def __comm_night(self, toUser):
try:
show_keyboard = ReplyKeyboardMarkup(keyboard=[[KeyboardButton(text='IR Automatic')],
[KeyboardButton(text='IR On'), KeyboardButton(text='IR Off')],
])
bot.sendMessage(toUser, "Select a night mode:", reply_markup=show_keyboard)
my_logger.info("NIGHT message sent to user " + str(toUser))
except:
my_logger.exception("Command failed! " + str(sys.exc_info()[0]))
def __comm_night_IR(self, toUser, enabled):
try:
hide_keyboard = ReplyKeyboardRemove()
my_logger.debug("Keyboard hided")
bot.sendMessage(toUser, 'wait...', reply_markup=hide_keyboard)
for camera in MyIPCameraBot_config.camere:
try:
r = self.__call_camera(camera, camera['url_motion_detection'].format(enabled))
if r.status_code == 200:
bot.sendMessage(toUser, 'Camera: {0} -- Infrared: {1}'.format(camera['id'], enabled))
my_logger.info("IR AUTO message sent to user " + str(toUser))
else:
bot.sendMessage(toUser, 'oops! Unable to contact camera ' + camera['id'])
except:
print(str(datetime.now()), 'Command failed! ', sys.exc_info()[0], toUser)
except:
my_logger.exception("Command __comm_night_IR failed!")
def __comm_rep(self, toUser):
try:
cpt = sum([len(files) for r, d, files in os.walk(MyIPCameraBot_config.IMAGES_PATH)])
show_keyboard = ReplyKeyboardMarkup(keyboard=[[KeyboardButton(text='Clear repository'),
KeyboardButton(text='Cancel')],
])
my_logger.debug("Reply keyboard showed.")
bot.sendMessage(toUser, "Repository folder contains {0} JPG.".format(cpt), reply_markup=show_keyboard)
my_logger.info("REP message sent to user " + str(toUser))
except:
my_logger.exception("Command __comm_rep failed!")
def __comm_rep_clear(self, toUser):
try:
hide_keyboard = ReplyKeyboardRemove()
my_logger.debug("Keyboard hided")
bot.sendMessage(toUser, 'wait...', reply_markup=hide_keyboard)
file_list_to_remove = glob.glob(MyIPCameraBot_config.IMAGES_PATH + "/*.jpg")
for filePath in file_list_to_remove:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
bot.sendMessage(toUser, "Repository cleared!")
my_logger.info("REP CLEAR message sent to user " + str(toUser))
except:
my_logger.exception("Command __comm_rep_clear failed!")
def __comm_rep_cancel(self, toUser):
try:
hide_keyboard = ReplyKeyboardRemove()
my_logger.debug("Keyboard hided")
bot.sendMessage(toUser, 'cancel...', reply_markup=hide_keyboard)
my_logger.info("REP CANCEL message sent to user " + str(toUser))
except:
my_logger.exception("Command __comm_rep_clear failed!")
def __comm_nonCapisco(self, toUser):
try:
bot.sendMessage(toUser, "sorry I do not understand...")
my_logger.info("NOT UNDERSTAND message sent to user " + str(toUser))
except:
my_logger.exception("Command failed! " + str(sys.exc_info()[0]))
def __getUser(self, userID):
for usr in MyIPCameraBot_config.users:
if usr['telegram_id'] == str(userID):
return usr
return None
def create_logger():
try:
# create logger
global my_logger
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
# create rotating file handler and set level
fl = logging.handlers.RotatingFileHandler(
MyIPCameraBot_config.LOG_FILENAME, maxBytes=500000, backupCount=5)
fl.setLevel(logging.INFO)
# create console handler and set level to debug
cns = logging.StreamHandler(sys.stdout)
cns.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatters
fl.setFormatter(formatter)
cns.setFormatter(formatter)
# add ch to logger
my_logger.addHandler(fl)
my_logger.addHandler(cns)
my_logger.debug("Rotating File logger created")
except:
print(str(sys.exc_info()))
my_logger.exception("Unable to create logger")
def send_bot_image(toUser, filename):
try:
my_logger.info("New ondemand JPG: " + filename)
f = open(filename, 'rb')
bot.sendPhoto(toUser, f)
my_logger.debug('Image message sent to ' + str(toUser))
lastMessage = datetime.now() # aggiorno il dateTime di ultima notifica
my_logger.debug("Last message dateTime set @: " + str(lastMessage))
except:
my_logger.exception("Unable to send image message to user")
finally:
f.close()
# ------ GESTORE DEL WATCHDOG
class WatchdogHandler(FileSystemEventHandler):
def on_created(self, event):
my_logger.debug("Auto discover new JPG: " + event.src_path)
# controllo che i nuovi files siano immagini con estensione .jpg
if os.path.splitext(event.src_path)[1] != ".jpg":
my_logger.debug("The new file is not a .jpg")
return None # no image .jpg
if (datetime.now() - lastMessage).seconds < MyIPCameraBot_config.SEND_SECONDS:
my_logger.info("Too many transmissions. Passed only {0}/{1} seconds.".format((datetime.now() - lastMessage).seconds, MyIPCameraBot_config.SEND_SECONDS))
return None # no image .jpg
# ciclo tra gli utenti in configurazione
for u in MyIPCameraBot_config.users:
if u is None:
continue
# verifico che gli utenti abbiano le notifiche PUSH abilitate e che sia già
# trascorso il tempo minimo tra due invii successivi
if u['push'] is True: # and (datetime.now()-lastMessage).seconds > MyIPCameraBot_config.SEND_SECONDS:
send_bot_image(u['telegram_id'], event.src_path)
else:
my_logger.info("Message not sent. The user may be configured without sending push. "
"They must spend at least {0} seconds"
"after the last transmission ({1})".format(MyIPCameraBot_config.SEND_SECONDS, lastMessage))
if __name__ == "__main__":
create_logger()
startTime = datetime.now()
my_logger.info("--------------------------------------")
my_logger.info("START @: " + str(startTime))
# datetime dell'ultimo messaggio inviato:
# E' possibile infatti impostare che tra un messaggio ed il successivo
# debbano trascorrere almeno un TOT di secondi
global lastMessage
lastMessage = datetime.now()
my_logger.debug("Last message dateTime set @: " + str(lastMessage))
# ------ TELEGRAM --------------
# inizializzo il BOT usando il TOKEN segreto dal file di configurazione
# ed utilizzando la classe gestore
try:
bot = BotCommandsHandler(MyIPCameraBot_config.TELEGRAM_BOT_TOKEN)
my_logger.info("Bot: " + str(bot.getMe()))
except:
my_logger.exception("Unable to init BOT!")
my_logger.exception("Unable to init BOT: EXIT!! " + str(sys.exc_info()[0]))
exit()
# invio un messaggio di benvenuto agli utenti censiti nel file di configurazione
try:
helpMessage = 'My commands:\n' \
'/help: commands list\n' \
'/jpg: I send you all JPG camera snapshot\n' \
'/motion: set motion detection\n' \
'/night: set night mode (infrared)\n' \
'/status: my status\n' \
'/rep: manage JPEG repository\n\n' \
'more info at www.ccworld.it\n'
for u in MyIPCameraBot_config.users:
if u is None:
break
my_logger.info('Welcome to Telegram user: ' + str(u))
welcome = "I'm active now!!\n\n" \
"I can send you camera's images when I detect a movement. " \
"Or you can ask for them whenever you want.\n\n"
bot.sendMessage(u['telegram_id'], 'Hi {0}! '.format(u['name']) + welcome + helpMessage)
bot.message_loop()
my_logger.info("Listen...")
except:
my_logger.exception("Problemi nella configuazione degli utenti: " + str(sys.exc_info()[0]))
# ------ WATCHDOG --------------
try:
# leggo il path su cui abilitare il watchDog dal file di configurazione,
# altrimenti imposto di default il percorso in cui risiede lo script python
# watchDogPath = MyIPCameraBot_config.IMAGES_PATH if MyIPCameraBot_config.IMAGES_PATH > 1 else '.'
watchDogPath = MyIPCameraBot_config.IMAGES_PATH
# associo la classe che gestisce la logica del watchDog, gli passo il percorso
# sul fil system locale e spengo la recursione delle cartelle
observer = Observer()
observer.schedule(WatchdogHandler(), watchDogPath, recursive=False)
# avvio il watchdog
observer.start()
my_logger.debug("Watchdog started")
except:
my_logger.exception("Watchdog error")
# tengo in vita il processo fino a che
# qualcuno non lo interrompe da tastiera
try:
while 1:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
# -*- coding: utf-8 -*-
class Branch(models.Model):
class Meta(object):
verbose_name = _('branch')
verbose_name_plural = _('branches')
app_label = 'library_branch'
name = models.CharField(max_length=100)
def __str__(self):
return "{}".format(self.name)
|
import cv2
import numpy as np
import os
import time
import argparse
import datetime
import imutils
from PIL import Image
## Init Face detect vars.
faceDetector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
recognizer = cv2.face.createLBPHFaceRecognizer();
font = cv2.FONT_HERSHEY_SIMPLEX;
recognizer.load("trainer/trainer.yml");
def CameraOn(id, DeviceList, event_flags):
camera_device = DeviceList[id]; # ????
camera = cv2.VideoCapture(0);
time.sleep(0.25);
# init 1st frame in the video
first_frame = None;
# init the average frame in vid stream.
(grabbed, frame) = camera.read();
# MOTION DETECTION VARS INITIALIZE.
frame = imutils.resize(frame, width=500);
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
gray = cv2.GaussianBlur(gray, (21,21), 0);
avg = np.float32(gray);
# loop over the frames of the video stream.
while(camera_device.status == "ON"): # later change this to the tcp stream frames !!!
(grabbed, frame) = camera.read();
text = "Unoccupied";
if(not grabbed):
print("Error could not grabb image frame in FacialRecog.py/CameraON function !!");
break;
# MOTION DETECTION OPTION
if(camera_device.room_attandeance == "ON"):
# resize the frame, convert it to greyscale, and blur it
frame = imutils.resize(frame, width=500);
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);
gray = cv2.GaussianBlur(gray, (21,21), 0);
# compute the absolute difference between the current
# frame and the first frame/avg.
#
cv2.accumulateWeighted(gray, avg, 0.05); # THE 3RD ARGUMENT HERE IS TE "ALPHA". This dictates how much sudden changes affect the running average.
# as alpha decreases, sudden changes shows no effect on running averages.
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg));
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1];
# dilate the thresholded image to fill in holes, then find
# contours on the thresholded image.
thresh = cv2.dilate(thresh, None, iterations=2);
(derp,cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE);
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if(cv2.contourArea(c) < args["min_area"]):
continue;
# compute the bounding box for the contour, draw it on the frame
# and update the text
(x, y, w, h) = cv2.boundingRect(c);
cv2.rectangle(frame, (x,y), (x + w, y + h), (0, 255, 0), 2);
text = "occupied";
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2);
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,255), 1);
# FACIAL RECOGNITION OPTION. MOTION DETECTION MUST BE ON IN ORDER TO WORK !!
if(camera_device.facial_recognition == "ON"):
if(text == "Occupied"):
DetectFaces(frame);
# CAR DETECT AND LICENSE PLATE OPTION. MOTION MUST ALSO BE ENABLED HERE !!!
# INSERT HERE WHEN ABLE.
# Send Email NOTIFICATION AND UPLOAD TO GOOGLE DRIVE IF MOTION DETECTED.
if(text == "Occupied"):
print("uploading to google drive");
print("sending email notification");
# show the frame and record if the user presses a key on server for debugging only !!!!
cv2.imshow("Security Feed", frame);
cv2.imshow("Thresh", thresh);
cv2.imshow("frame Delta", frameDelta);
# write to the web server directory to display video on website.
output_image_path = "/var/www/html/Images/video_frame" + str(id) + ".jpg";
cv2.imwrite(output_image_path, frame);
# if the 'q' key is pressed, break from the loop
key = cv2.waitKey(1) & 0xFF;
if key == ord("q"):
break;
# clean up
camera.release();
cv2.destroyAllWindows();
return;
def RememberFace(camera, event_flags):
event_flags.new_face = False;
Id = event_flags.new_face_id;
sampleNum = 0;
camera = cv2.VideoCapture(0); # num of server camera=0.
time.sleep(0.25);
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#incrementing sample number
sampleNum=sampleNum+1
#saving the captured face in the dataset folder
cv2.imwrite("dataSet/User."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('frame',img)
#wait for 100 miliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# break if the sample number is morethan 20
elif sampleNum>20:
break;
faces,Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.save('trainer/trainer.yml')
return;
def getImagesAndLabels(path):
#get the path of all the files in the folder
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
#create empth face list
faceSamples=[]
#create empty ID list
Ids=[]
#now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
#loading the image and converting it to gray scale
pilImage=Image.open(imagePath).convert('L')
#Now we are converting the PIL image into numpy array
imageNp=np.array(pilImage,'uint8')
#getting the Id from the image
Id=int(os.path.split(imagePath)[-1].split(".")[1])
# extract the face from the training image sample
faces=faceDetector.detectMultiScale(imageNp)
#If a face is there then append that in the list as well as Id of it
for (x,y,w,h) in faces:
faceSamples.append(imageNp[y:y+h,x:x+w])
Ids.append(Id)
return faceSamples,Ids
def DetectFaces(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
faces = faceDetector.detectMultiScale(gray, 1.3, 5);
id = "";
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (w+x, y+h), (0,0,255), 2);
id = recognizer.predict(gray[y:y+h, x:x+w]);
person = CheckForPerson(id, FacesList);
cv2.putText(img, person, (x,y+h), font, 1,(255,255,255),2);
return;
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import time
if __name__ != '__main__':
from config import cfg
else:
from easydict import EasyDict as edict
cfg = edict()
cfg.VOXEL_POINT_COUNT = 50
cfg.POINT_FEATURE_LEN = 6
cfg.GRID_Z_SIZE, cfg.GRID_Y_SIZE, cfg.GRID_X_SIZE = 1, 100, 200
class FeatureNet_Simple(object):
def __init__(self, training, batch_size, name=''):
super(FeatureNet_Simple, self).__init__()
self.training = training
# scalar
self.batch_size = batch_size
# [K, T, F]
self.feature_pl = tf.placeholder(tf.float32, [None, cfg.VOXEL_POINT_COUNT, cfg.POINT_FEATURE_LEN], name='feature')
# [K]
self.number_pl = tf.placeholder(tf.int64, [None], name='number')
# []
self.voxcnt_pl = tf.placeholder(tf.int64, [None], name='total_voxel_cnt')
# [K, T, 1]
self.mask_pl = tf.placeholder(tf.bool, [None, cfg.VOXEL_POINT_COUNT, 1], name='mask')
# [K, 4], each row stores (batch, d, h, w)
self.coordinate_pl = tf.placeholder(tf.int64, [None, 4], name='coordinate')
min_z = tf.reduce_min(self.feature_pl[:,:,2], axis=-1, keepdims=True)
max_z = tf.reduce_max(self.feature_pl[:,:,2], axis=-1, keepdims=True)
max_intensity = tf.reduce_max(self.feature_pl[:,:,3], axis=-1, keepdims=True)
mean_intensity = tf.reduce_sum(self.feature_pl[:,:,3], axis=-1, keepdims=True)
mean_intensity = mean_intensity / tf.reduce_sum(tf.cast(self.mask_pl, tf.float32), axis=1, keepdims=False)
number_vox = tf.expand_dims(tf.cast(self.number_pl, tf.float32), axis=-1) / cfg.VOXEL_POINT_COUNT
self.voxelwise = tf.concat((min_z, max_z, max_intensity, mean_intensity, number_vox), axis=-1)
Cout = self.voxelwise.get_shape()[-1]
self.outputs = tf.scatter_nd(
self.coordinate_pl, self.voxelwise, [self.batch_size, cfg.GRID_Z_SIZE, cfg.GRID_Y_SIZE, cfg.GRID_X_SIZE, Cout])
if __name__ == '__main__':
training = tf.placeholder(tf.bool)
fns = FeatureNet_Simple(training, 2)
voxels_total = 32
feature_in = np.random.rand(voxels_total, cfg.VOXEL_POINT_COUNT, cfg.POINT_FEATURE_LEN)
number_in = np.ones([voxels_total,], dtype=np.int64)
voxcnt_in = np.array([12, 20], dtype=np.int64)
mask_in = np.ones([voxels_total, cfg.VOXEL_POINT_COUNT, 1], dtype=np.bool)
#
sess = tf.Session()
sess.run(tf.global_variables_initializer())
ret = sess.run(fns.voxelwise, {fns.feature_pl: feature_in,
fns.number_pl: number_in,
fns.voxcnt_pl: voxcnt_in,
fns.mask_pl: mask_in,
fns.training: False})
print(ret.shape) |
import sys
from PyQt4.QtCore import pyqtSlot
from PyQt4 import QtCore, QtGui, uic,QtTest
from PyQt4.QtGui import *
import subprocess
from time import sleep
output = subprocess.Popen('xrandr | grep "\*" | cut -d" " -f4',shell=True, stdout=subprocess.PIPE).communicate()[0]
resolution = output.split()[0].split(b'x')
a = QApplication(sys.argv)
global x #width of screen
global y #height of screen
global textbox
global cur_stage
cur_stage = 0
class UI:
def __init__(self):
self.x = int(resolution[0]) #width of screen
self.y = int(resolution[1]) #height of screen
self.w1 = QWidget()
self.w1.setWindowTitle("Enter coin")
self.w1.resize(self.x, self.y)
self.w2 = QWidget()
self.w2.setWindowTitle("")
self.w2.resize(self.x, self.y)
textbox = QLabel(self.w2)
self.w3 = QWidget()
self.w3.setWindowTitle("")
self.w3.resize(self.x, self.y)
self.w4 = QWidget()
self.w4.setWindowTitle("")
self.w4.resize(self.x, self.y)
self.w5 = QWidget()
self.w5.setWindowTitle("")
self.w5.resize(self.x, self.y)
self.w6 = QWidget()
self.w6.setWindowTitle("")
self.w6.resize(self.x, self.y)
def amt_screen(self,stage,text=''):
if stage=='1':
#window w1
self.label_w1 = QLabel(self.w1)
self.movie = QMovie("g3.gif")
self.movie.setScaledSize(QtCore.QSize(self.x,self.y))
self.label_w1.setMovie(self.movie)
self.movie.start()
try:
self.w5.close()
except:
print("")
try:
self.w4.close()
except:
print("")
self.w1.show()
elif stage=='2':
if cur_stage==0:
textbox.clear()
font = QtGui.QFont()
font.setPointSize(100)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
textbox.move(x/3, y/3)
textbox.setText("You entered \n\tRs. "+text)
textbox.setFont(font)
else:
try:
w1.close()
except:
print("")
try:
w2.close()
except:
print("")
#window w2
label_w2=QLabel(w2)
image = QPixmap("ret_money.jpg")
image2=image.scaled(x,y)
label_w2.setPixmap(image2)
font = QtGui.QFont()
font.setPointSize(100)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
textbox.move(x/3, y/3)
textbox.setText("You entered \n\tRs. "+text)
textbox.setFont(font)
w2.show()
elif stage=='3':
#window w3
label_w3=QLabel(w3)
image_w3 = QPixmap("")
image2_w3=image_w3.scaled(x,y)
label_w3.setPixmap(image2_w3)
btn_w3_1=QPushButton("Continue",w3)
btn_w3_1.move(x/2,y/2)
try:
w2.close()
except:
print("")
w3.show()
elif stage=='4':
#window w4
label_w4 = QLabel(w4)
movie = QMovie("g2.gif")
movie.setScaledSize(QtCore.QSize(x,y))
label_w4.setMovie(movie)
movie.start()
try:
w3.close()
except:
print("")
w4.show()
print('a')
QtTest.QTest.qWait(2000)
print('b')
w4.close()
#amt_screen(1,text)
elif stage=='5':
#window w5
label_w5=QLabel(w5)
image_w5 = QPixmap("ret_money.jpg")
image2_w5=image_w5.scaled(x,y)
label_w5.setPixmap(image2_w5)
try:
w6.close()
except:
w2.close()
w5.show()
QtTest.QTest.qWait(3000)
w5.close()
elif stage=='6':
#window w6
label_w6=QLabel(w6)
image_w6 = QPixmap("weong.png")
image2_w6=image_w6.scaled(x,y)
label_w6.setPixmap(image2_w6)
#btn_w6_1=QPushButton("Cancel",w6)
#btn_w6_1.move(x/2,y/2)
try:
w1.close()
except:
print("")
w6.show()
a.exec_()
obj = UI()
obj.amt_screen(1,1)
sys.exit()
|
from etk.etk import ETK
from etk.knowledge_graph import KGSchema
from etk.extractors.glossary_extractor import GlossaryExtractor
from etk.etk_module import ETKModule
from etk.wikidata import *
class ExampleETKModule(ETKModule):
"""
Abstract class for extraction module
"""
def __init__(self, etk):
ETKModule.__init__(self, etk)
self.name_extractor = GlossaryExtractor(self.etk.load_glossary("./names.txt"), "name_extractor",
self.etk.default_tokenizer, case_sensitive=False, ngrams=1)
def process_document(self, doc):
"""
Douglas_Adams educated_at
value: St_John's_College
qualifier: start_time 1971
qualifier: end_time 1974
reference: stated_in Encyclopædia_Britannica_Online
rank: normal
"""
for k, v in wiki_namespaces.items():
doc.kg.bind(k, v)
p = WDProperty('C3001', Datatype.QuantityValue)
p.add_label('violent crime offenses', lang='en')
p.add_description("number of violent crime offenses reported by the sheriff's office or county police department",
lang='en')
p.add_statement('P31', Item('D1001'))
p.add_statement('P1629', Item('Q1520311'))
doc.kg.add_subject(p)
return list()
if __name__ == "__main__":
kg_schema = KGSchema()
kg_schema.add_schema('@prefix : <http://isi.edu/> .', 'ttl')
etk = ETK(kg_schema=kg_schema, modules=ExampleETKModule)
doc = etk.create_document({}, doc_id="http://isi.edu/default-ns/projects")
docs = etk.process_ems(doc)
print(docs[0].kg.serialize('ttl'))
with open('p.tsv', 'w') as fp:
serialize_change_record(fp)
|
#!/usr/bin/python
# ***************************************************************************
# Author: Christian Wolf
# christian.wolf@insa-lyon.fr
#
# Begin: 22.9.2019
# ***************************************************************************
import glob
import os
import numpy as np
#from skimage import io
from numpy import genfromtxt
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
#from torch.utils.tensorboard import SummaryWriter
from dataset_det import Balls_CF_Detection, COLORS#
STATS_INTERVAL = 10
'''
class MNISTDataset(Dataset):
def __init__(self, dir, transform=None):
self.no_images=0
self.transform = transform
arrarr = [None]*10
for i in range(10):
print (i)
regex="%s/%i/*.png"%(dir,i)
entries=glob.glob(regex)
arr=[None]*len(entries)
for j,filename in enumerate(entries):
# arr[j] = torch.tensor(io.imread(filename))
arr[j] = io.imread(filename)
if self.transform:
arr[j] = self.transform(arr[j])
arrarr[i] = arr
self.no_images = self.no_images + len(entries)
# Flatten into a single array
self.images = [None]*self.no_images
self.labels = [None]*self.no_images
g_index=0
for i in range(10):
for t in arrarr[i]:
self.images[g_index] = t
self.labels[g_index] = i
g_index += 1
# The access is _NOT_ shuffled. The Dataloader will need
# to do this.
def __getitem__(self, index):
return self.images[index], self.labels[index]
# Return the dataset size
def __len__(self):
return self.no_images
BATCHSIZE=50
valid_dataset = MNISTDataset ("MNIST-png/testing",
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])) # mean, std of dataset
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=BATCHSIZE, shuffle=True)
train_dataset = MNISTDataset ("MNIST-png/training",
transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])) # mean, std of dataset)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=BATCHSIZE, shuffle=True)
'''
BATCHSIZE=250
train_dataset = Balls_CF_Detection("./mini_balls/train", 0, 16000) #, transforms.Normalize([128, 128, 128], [3, 3, 3]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCHSIZE, shuffle=True)
valid_dataset = Balls_CF_Detection("./mini_balls/train", 16000, 21000) #, transforms.Normalize([128, 128, 128], [3, 3, 3]))
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCHSIZE, shuffle=True)
class LeNet(torch.nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 5, 1) #(3, 12, 5, 1)
self.conv2 = torch.nn.Conv2d(3, 5, 3, 1) #(12, 36, 5, 1)
self.fc1 = torch.nn.Linear(23*23*5, 50) #(174240//10, 400)
self.fc2 = torch.nn.Linear(50, 9) #(400, 9)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 23*23*5) #(-1, 174240//10)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.sigmoid(x) #x
model = LeNet()
model = model.to("cuda:0")
# This criterion combines LogSoftMax and NLLLoss in one single class.
#crossentropy = torch.nn.CrossEntropyLoss(reduction='mean')
crossentropy = torch.nn.BCELoss()# #(reduction='mean')
# Set up the optimizer: stochastic gradient descent
# with a learning rate of 0.01
optimizer = torch.optim.Adam(model.parameters()) #SGD( ... , lr=0.01)
# Setting up tensorboard
#writer = SummaryWriter('runs/mnist_lenet')
# ************************************************************************
# Calculate the error of a model on data from a given loader
# This is used to calculate the validation error every couple of
# thousand batches
# ************************************************************************
def calcError (net, dataloader):
vloss=0
vcorrect_any = 0
vcorrect=0
vcount_any = 0
vcount=0
for batch_idx, (data, labels, _) in enumerate(dataloader):
data = data.to("cuda:0")
labels = labels.to("cuda:0")
y = model(data)
loss = crossentropy(y, labels)
vloss += loss.item()
#_, predicted = torch.max(y.data, 1)
#vcorrect += (predicted == labels).sum().item()
'''
tvals, tidx = torch.topk(y, 3)
res = torch.zeros(BATCHSIZE, 9)
res = res.scatter(1, tidx, tvals)
'''
#res = (abs(1 - y) < 0.5)
res = torch.round(y.data)
#'''
#vcorrect_any += (res == (labels == 1.)).sum().item()
vcorrect_any += (res == labels).sum().item()
vcount_any += BATCHSIZE * 9
#'''
vcorrect += sum(row.sum().item() == 9 for row in (res == labels))
vcount += BATCHSIZE
return vloss/len(dataloader), 100.0*(1.0-vcorrect/vcount), 100.0*(1.0-vcorrect_any/vcount_any)
def main():
# Training
running_loss = 0.0
running_correct = 0
running_count = 0
running_correct_any = 0
running_count_any = 0
'''
# Add the graph to tensorboard
dataiter = iter(train_loader)
data, labels, _ = dataiter.next()
writer.add_graph (model, data)
writer.flush()
'''
# Cycle through epochs
for epoch in range(100):
# Cycle through batches
for batch_idx, (data, labels, _) in enumerate(train_loader):
data = data.to("cuda:0")
labels = labels.to("cuda:0")
optimizer.zero_grad()
y = model(data)
loss = crossentropy(y, labels)
loss.backward()
running_loss += loss.cpu().item()
optimizer.step()
#_, predicted = torch.max(y.data.cpu(), 1)
#running_correct += (predicted == (labels == 1.)).sum().item()
'''
tvals, tidx = torch.topk(y, 3)
res = torch.zeros(BATCHSIZE, 9)
res = res.scatter(1, tidx, tvals)
'''
#res = (abs(1 - y) < 0.5)
res = torch.round(y.data)
#'''
#running_correct_any += (res == (labels == 1.)).sum().item()
running_correct_any += (res == labels).sum().item()
running_count_any += BATCHSIZE * 9
#'''
running_correct += sum(row.sum().item() == 9 for row in (res == labels))
running_count += BATCHSIZE
# Print statistics
if ((batch_idx+1) % STATS_INTERVAL) == 0:
train_err = 100.0*(1.0-running_correct / running_count)
train_err_any = 100.0*(1.0-running_correct_any / running_count_any)
valid_loss, valid_err, valid_err_any = calcError(model, valid_loader)
print ('Epoch: %d batch: %5d' % (epoch + 1, batch_idx + 1), end=" ")
print (' train-loss: %.3f train-err: %.3f %.3f' % (running_loss / STATS_INTERVAL, train_err, train_err_any), end=" ")
print (' valid-loss: %.3f valid-err: %.3f %.3f' % (valid_loss, valid_err, valid_err_any))
'''
# Write statistics to the log file
writer.add_scalars ('Loss', {
'training:': running_loss / STATS_INTERVAL,
'validation:': valid_loss },
epoch * len(train_loader) + batch_idx)
writer.add_scalars ('Error', {
'training:': train_err,
'validation:': valid_err },
epoch * len(train_loader) + batch_idx)
'''
running_loss = 0.0
running_correct = 0.0
running_count=0.0
if __name__ == "__main__":
main()
|
import pathlib
from os import getenv
from logging import INFO
# Logging settings
LOG_LEVEL = INFO
LOGGER_FORMAT = "%(asctime)s %(message)s"
# Path refs
ROOT = pathlib.Path(__file__).parents[1]
DATA_FOLDER = ROOT.joinpath("data")
LOG_FOLDER = ROOT.joinpath("log")
# Crawler
QUERY_RETRY_LIMIT = 3
SEMAPHORE_LIMIT = 10
SEMAPHORE_WAIT = 5
HEADERS = {
'user-agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.101 Safari/537.36'),
}
VERBOSE = int(getenv("VERBOSE", "2"))
|
# fromkeys(seq, value):
# crea un nuevo dic
dic_1 = {
'Name': 'Pepe',
'Age': 200
}
dic_2 = {
'ID': 73783287328732,
'Tel': 1532233
}
sequ_1 = ('Name', 'Age', 'ID', 'Tel')
#update()
dic_1.update(dic_2)
print(f'new dic: {str(dic_1)}')
#fromkeys(sequ, VALOR)
dict_fromkeys = dict.fromkeys(sequ_1, 'pepe')
print(str(dict_fromkeys))
#has_key() True si existe
#get(key, val) si la key no existe, devuelve val
if not 'pepe' in dic_1:
print('No tiene')
if 'ID' in dic_1:
print('tiene')
print(dic_1.get('pepe', "no existe"))
print(dic_1.setdefault('pepe', "nuevo valor creado"))
print(f'new dic: {str(dic_1)}') |
import json
import urllib.request
import sqlite3
#pull latest data into file
def pullData():
data = urllib.request.urlopen("https://frontlinehelp.api.ushahidi.io/api/v3/posts/geojson").read()
serialData = json.loads(data)
with open('data.json','w',encoding='utf-8') as file:
json.dump(serialData,file)
#read data from file
def readGeoData():
with open('data.json','r') as file:
data = file.read()
return json.loads(data)
"""
read record data from file
depracated since no longer needed
"""
#def readRecordData():
# with open('record.json','r') as file:
# data = file.read()
# return json.loads(data)
#find number of records
def findRange(data):
try:
x = 0
while True:
data['features'][x]
x+=1
except IndexError: #catch index out of bounds, indicates latest element
return x-1
#returns lat,long for a specified id
def findCoords(geoData,recordID):
coords = geoData['features'][recordID]['geometry']['geometries'][0]['coordinates']
longitude = coords[0]
latitude = coords[1]
return latitude,longitude
#grab url links
def findURL(data,recordID):
return data['features'][recordID]['properties']['url']
"""
follow url and dump data
same method as pullData
depracated since inefficient read/writing
"""
#def followUrlDumpData(url):
# data = urllib.request.urlopen(url).read()
# serialData = json.loads(data) #serialise data
# with open('record.json','w',encoding='utf-8') as file:
# json.dump(serialData,file)
#get specific record url (in memory)
def followUrlRetData(url):
data = urllib.request.urlopen(url).read()
return json.loads(data) #serialise data
#insert to database
def insertToDB(recordID,formID,lat,long,postcode,url):
c.execute('INSERT INTO records VALUES(?,?,?,?,?,?)',(recordID,formID,lat,long,postcode,url))
conn.commit()
#get record id
def findID(recordData):
return recordData['id']
#get form id
def findFormID(recordData):
return recordData['form']['id']
"""
try find postcode based on key (hash)
if key doesnt work return null
"""
def findPostcode(recordData):
try:
return recordData['values']['ecd7d7fd-da36-4ace-a78a-571c5e296ad4'][0]
except:
return None
#dump all info into db
def dumpRecordsToDB(geoData,recordRange):
for i in range(0,recordRange):
try:
recordData = followUrlRetData(findURL(geoData,i))
recordID = findID(recordData)
formID = findFormID(recordData)
latitude,longitude = findCoords(geoData,i)
postcode = findPostcode(recordData)
insertToDB(recordID,formID,latitude,longitude,postcode,findURL(geoData,i))
except KeyError as e:
print(e)
continue
def geoJson():
pullData()
geoData = readGeoData()
recordRange = findRange(readGeoData())
dumpRecordsToDB(geoData,recordRange)
conn.close() #disconnect from db
conn = sqlite3.connect('insights.db') #connect to db
c = conn.cursor() #create cursor obj
geoJson()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CarRentalMerchantInfo(object):
def __init__(self):
self._brand_name = None
self._merchant_contact = None
self._pid = None
self._smid = None
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def merchant_contact(self):
return self._merchant_contact
@merchant_contact.setter
def merchant_contact(self, value):
self._merchant_contact = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def smid(self):
return self._smid
@smid.setter
def smid(self, value):
self._smid = value
def to_alipay_dict(self):
params = dict()
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.merchant_contact:
if hasattr(self.merchant_contact, 'to_alipay_dict'):
params['merchant_contact'] = self.merchant_contact.to_alipay_dict()
else:
params['merchant_contact'] = self.merchant_contact
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.smid:
if hasattr(self.smid, 'to_alipay_dict'):
params['smid'] = self.smid.to_alipay_dict()
else:
params['smid'] = self.smid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CarRentalMerchantInfo()
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'merchant_contact' in d:
o.merchant_contact = d['merchant_contact']
if 'pid' in d:
o.pid = d['pid']
if 'smid' in d:
o.smid = d['smid']
return o
|
import numpy as np
import tensorflow as tf
import argparse
import time
import os
import cPickle
from utils import TextLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/scotus',
help='data directory containing input.txt')
parser.add_argument('--save_dir', type=str, default='models/new_save',
help='directory for checkpointed models (load from here if one is already present)')
parser.add_argument('--rnn_size', type=int, default=1500,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=4,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='gru',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=40,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=50,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=6e-5,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.95,
help='how much to decay the learning rate')
parser.add_argument('--decay_steps', type=int, default=100000,
help='how often to decay the learning rate')
args = parser.parse_args()
train(args)
def train(args):
# Create the data_loader object, which loads up all of our batches, vocab dictionary, etc.
# from utils.py (and creates them if they don't already exist).
# These files go in the data directory.
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
load_model = False
if not os.path.exists(args.save_dir):
print("Creating directory %s" % args.save_dir)
os.mkdir(args.save_dir)
elif (os.path.exists(os.path.join(args.save_dir, 'config.pkl'))):
# Trained model already exists
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
with open(os.path.join(args.save_dir, 'config.pkl')) as f:
saved_args = cPickle.load(f)
args.rnn_size = saved_args.rnn_size
args.num_layers = saved_args.num_layers
args.model = saved_args.model
print("Found a previous checkpoint. Overwriting model description arguments to:")
print(" model: {}, rnn_size: {}, num_layers: {}".format(
saved_args.model, saved_args.rnn_size, saved_args.num_layers))
load_model = True
# Save all arguments to config.pkl in the save directory -- NOT the data directory.
with open(os.path.join(args.save_dir, 'config.pkl'), 'w') as f:
cPickle.dump(args, f)
# Save a tuple of the characters list and the vocab dictionary to chars_vocab.pkl in
# the save directory -- NOT the data directory.
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'w') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
# Create the model!
print("Building the model")
model = Model(args)
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(model.save_variables_list())
if (load_model):
print("Loading saved parameters")
saver.restore(sess, ckpt.model_checkpoint_path)
global_epoch_fraction = sess.run(model.global_epoch_fraction)
global_seconds_elapsed = sess.run(model.global_seconds_elapsed)
if load_model: print("Resuming from global epoch fraction {:.3f},"
" total trained time: {}, learning rate: {}".format(
global_epoch_fraction, global_seconds_elapsed, sess.run(model.lr)))
data_loader.cue_batch_pointer_to_epoch_fraction(global_epoch_fraction)
initial_batch_step = int((global_epoch_fraction
- int(global_epoch_fraction)) * data_loader.total_batch_count)
epoch_range = (int(global_epoch_fraction),
args.num_epochs + int(global_epoch_fraction))
writer = tf.summary.FileWriter(args.save_dir, graph=tf.get_default_graph())
outputs = [model.cost, model.final_state, model.train_op, model.summary_op]
is_lstm = args.model == 'lstm'
global_step = epoch_range[0] * data_loader.total_batch_count + initial_batch_step
try:
for e in xrange(*epoch_range):
# e iterates through the training epochs.
# Reset the model state, so it does not carry over from the end of the previous epoch.
state = sess.run(model.initial_state)
batch_range = (initial_batch_step, data_loader.total_batch_count)
initial_batch_step = 0
for b in xrange(*batch_range):
global_step += 1
if global_step % args.decay_steps == 0:
# Set the model.lr element of the model to track
# the appropriately decayed learning rate.
current_learning_rate = sess.run(model.lr)
current_learning_rate *= args.decay_rate
sess.run(tf.assign(model.lr, current_learning_rate))
print("Decayed learning rate to {}".format(current_learning_rate))
start = time.time()
# Pull the next batch inputs (x) and targets (y) from the data loader.
x, y = data_loader.next_batch()
# feed is a dictionary of variable references and respective values for initialization.
# Initialize the model's input data and target data from the batch,
# and initialize the model state to the final state from the previous batch, so that
# model state is accumulated and carried over between batches.
feed = {model.input_data: x, model.targets: y}
if is_lstm:
for i, (c, h) in enumerate(model.initial_state):
feed[c] = state[i].c
feed[h] = state[i].h
else:
for i, c in enumerate(model.initial_state):
feed[c] = state[i]
# Run the session! Specifically, tell TensorFlow to compute the graph to calculate
# the values of cost, final state, and the training op.
# Cost is used to monitor progress.
# Final state is used to carry over the state into the next batch.
# Training op is not used, but we want it to be calculated, since that calculation
# is what updates parameter states (i.e. that is where the training happens).
train_loss, state, _, summary = sess.run(outputs, feed)
elapsed = time.time() - start
global_seconds_elapsed += elapsed
writer.add_summary(summary, e * batch_range[1] + b + 1)
print "{}/{} (epoch {}/{}), loss = {:.3f}, time/batch = {:.3f}s" \
.format(b, batch_range[1], e, epoch_range[1], train_loss, elapsed)
# Every save_every batches, save the model to disk.
# By default, only the five most recent checkpoint files are kept.
if (e * batch_range[1] + b + 1) % args.save_every == 0 \
or (e == epoch_range[1] - 1 and b == batch_range[1] - 1):
save_model(sess, saver, model, args.save_dir, global_step,
data_loader.total_batch_count, global_seconds_elapsed)
except KeyboardInterrupt:
# Introduce a line break after ^C is displayed so save message
# is on its own line.
print()
finally:
writer.flush()
global_step = e * data_loader.total_batch_count + b
save_model(sess, saver, model, args.save_dir, global_step,
data_loader.total_batch_count, global_seconds_elapsed)
def save_model(sess, saver, model, save_dir, global_step, steps_per_epoch, global_seconds_elapsed):
global_epoch_fraction = float(global_step) / float(steps_per_epoch)
checkpoint_path = os.path.join(save_dir, 'model.ckpt')
print "Saving model to {} (epoch fraction {:.3f})".format(checkpoint_path, global_epoch_fraction)
sess.run(tf.assign(model.global_epoch_fraction, global_epoch_fraction))
sess.run(tf.assign(model.global_seconds_elapsed, global_seconds_elapsed))
saver.save(sess, checkpoint_path, global_step = global_step)
print "Model saved."
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import os
import pytest
from ymir.schema import validators as v
from ymir import schema
from ymir import api as yapi
import tests.common as test_common
Invalid = v.Invalid
@test_common.mock_aws
def test_derived_schema(**extra_json_fields):
with test_common.demo_service() as ctx:
ctx.rewrite_json(
name="original-service",
org_name='original-org')
service = ctx.get_service()
efile = os.path.join(service._ymir_service_root, "extension.json")
import json
extension_data = dict(
extends=service._ymir_service_json_file,
name="extension-service")
extension_data.update(extra_json_fields)
with open(efile, 'w') as fhandle:
fhandle.write(json.dumps(extension_data))
ex_service = yapi.load_service_from_json(efile, die=False)
assert ex_service.template_data()['name'] == extension_data['name']
assert ex_service.template_data()['org_name'] == 'original-org'
def test_illegal_derived_schema():
with pytest.raises(Exception):
test_derived_schema(
name="bad-extension-service",
very_bad_field="field not allowed")
|
import pytest
from binary_search_tree.tree import Tree
@pytest.fixture()
def empty_tree() -> Tree():
return Tree()
@pytest.fixture()
def tree_with_nodes(empty_tree) -> Tree():
empty_tree.add(5, "Peter")
empty_tree.add(3, "Paul")
empty_tree.add(1, "Mary")
empty_tree.add(10, "Karla")
empty_tree.add(15, "Ada")
empty_tree.add(25, "Kari")
return empty_tree
def test_add_and_find(tree_with_nodes):
assert tree_with_nodes.find(5) == "Peter"
assert tree_with_nodes.find(15) == "Ada"
assert tree_with_nodes.find(3) == "Paul"
def test_find_returns_none_for_empty_tree(empty_tree):
assert empty_tree.find(5) == None
def test_find_returns_value_in_tree(tree_with_nodes):
assert tree_with_nodes.find(25) == "Kari"
def test_find_returns_none_for_values_not_in_tree(tree_with_nodes):
assert tree_with_nodes.find(6) == None
def test_inorder_with_empty_tree(empty_tree):
answer = empty_tree.inorder()
assert empty_tree.inorder() == []
def test_inorder_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 1,
"value": "Mary"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 5,
"value": "Peter"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.inorder()
assert answer == expected_answer
def test_preorder_on_empty_tree(empty_tree):
assert empty_tree.preorder() == []
def test_preorder_on_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 5,
"value": "Peter"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 1,
"value": "Mary"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.preorder()
assert answer == expected_answer
def test_postorder_on_empty_tree(empty_tree):
assert empty_tree.postorder() == []
def test_postorder_on_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 1,
"value": "Mary"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 25,
"value": "Kari"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 5,
"value": "Peter"
}
]
answer = tree_with_nodes.postorder()
assert answer == expected_answer
def test_height_of_empty_tree_is_zero(empty_tree):
assert empty_tree.height() == 0
def test_height_of_one_node_tree(empty_tree):
empty_tree.add(5, "pasta")
assert empty_tree.height() == 1
def test_height_of_many_node_tree(tree_with_nodes):
assert tree_with_nodes.height() == 4
tree_with_nodes.add(2, "pasta")
tree_with_nodes.add(2.5, "bread")
assert tree_with_nodes.height() == 5
def test_bfs_with_empty_tree(empty_tree):
assert empty_tree.bfs() == []
def test_bfs_with_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 5,
"value": "Peter"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 1,
"value": "Mary"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.bfs()
assert answer == expected_answer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.