repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
marina-kantar/Python-for-Everybody | refs/heads/master | d = {'a' : 2 , 'c' : 1 , 'd' : 4, 'b' : 3}
t =d.items()
print(t)
s = sorted(d.items())
print(s)
for i, v in sorted(d.items()) :
print(i, v)
o = list()
for i,v in d.items():
o.append((v, i))
print(o)
o= sorted(o, reverse=True)
| Python | 12 | 18.5 | 42 | /sorttuples.py | 0.508547 | 0.491453 |
marina-kantar/Python-for-Everybody | refs/heads/master | import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter url: ')
if len(url) < 1: url = 'http://py4e-data.dr-chuck.net/comments_468304.json'
fhand = urllib.request.urlopen(url, context=ctx)
data = fhand.read()
info = json.loads(data)
#print('User count:', len(info))
br = 0
suma = 0
for item in info["comments"]:
br = br + 1
suma = suma + item["count"]
print('Count:', br)
print('Sum' , suma) | Python | 27 | 20.074074 | 75 | /json_assignment.py | 0.684859 | 0.665493 |
marina-kantar/Python-for-Everybody | refs/heads/master | import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter url: ')
if len(url) < 1: url = 'http://py4e-data.dr-chuck.net/comments_468303.xml'
fhand = urllib.request.urlopen(url, context=ctx)
data = fhand.read()
tree = ET.fromstring(data)
br = 0
suma = 0
counts = tree.findall('.//count')
for item in counts :
br = br + 1
suma = suma + int(item.text)
print('Count:', br)
print('Sum' , suma)
| Python | 27 | 20.74074 | 74 | /assignment_parsing_xml.py | 0.693356 | 0.674617 |
marina-kantar/Python-for-Everybody | refs/heads/master | import sqlite3
conn = sqlite3.connect('emaildb.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Counts')
cur.execute('CREATE TABLE Counts (email TEXT, counts INTEGER)')
fname = input('Enter file name: ')
if len(fname)< 1 : fname = 'mbox-short.txt'
handle = open(fname)
for line in handle :
if not line.startswith('From: ') : continue
piece = line.split()
email = piece[1]
cur.execute('SELECT counts FROM Counts WHERE email= ?', (email,))
row = cur.fetchone()
if row is None :
cur.execute('INSERT INTO Counts (email, counts) VALUES (?, 1)', (email,))
else :
cur.execute('UPDATE Counts SET counts = counts + 1 WHERE email = ?', (email,))
conn.commit()
# ogranici na 10
sqlstr = 'SELECT email, counts FROM Counts ORDER BY counts DESC LIMIT 10'
for row in cur.execute(sqlstr):
print(str(row[0]), row[1])
cur.close() | Python | 30 | 28.566668 | 86 | /sqlite_py.py | 0.655756 | 0.642212 |
ShaneRich5/lab3-ex1 | refs/heads/master | import smtplib
fromaddr = 'shane.richards212@gmail'
toaddr = 'david@alteroo.com'
message = """From: {} <{}>
To: {} <{}>
Subject: {}
{}
"""
messagetosend = message.format(
fromname,
fromaddr,
toname,
toaddr,
subject,
msg)
# Credentials
username = 'shane.richards212@gmail.com'
password = 'curryishot'
# The actual message
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddr, messagetosend)
server.quit() | Python | 30 | 13.866667 | 48 | /sendmail.py | 0.676404 | 0.669663 |
Basetcan/Rock_Paper_Scissors-AI-Game | refs/heads/main | # importing the libraries that we use
import random
import numpy as np
import math
import itertools
import time
choice = 0
rock_data = []
paper_data = []
scissors_data = []
next_move_rock = []
next_move_paper = []
next_move_scissors = []
## We keep the times of the moves in these lists.
player0_rock = []
player0_paper = []
player0_scissors = []
##ai move's times
player1_rock = []
player1_paper = []
player1_scissors = []
## move time
counter = 1
## counters for possible states
player0_won = 0
player1_won = 0
draw = 0
## for select mode ai vs user or ai vs comp
mode = int(input("which mode (1)user vs ai (2)comp vs ai"))
## how many times do you want
n_times = int(input("how many times do you want to play ?"))
## we use this method for compare the moves and get result.we can learn who is winner with this method.
def compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors) :
global draw
global player0_won
global player1_won
## this conditions for get the winner side,these return boolean values
rock_result_0 = counter in player0_rock
rock_result_1 = counter in player1_rock
paper_result_0 = counter in player0_paper
paper_result_1 = counter in player1_paper
scissors_result_0 = counter in player0_scissors
scissors_result_1 = counter in player1_scissors
## we compare the couple result,we get winner side and number of their win,lose and so on.
if rock_result_0 and rock_result_1 :
print("DRAW")
draw += 1
if paper_result_0 and paper_result_1 :
print("DRAW")
draw += 1
if scissors_result_0 and scissors_result_1 :
print("DRAW")
draw += 1
if rock_result_0 and paper_result_1 :
print("PLAYER AI WON")
player1_won += 1
if rock_result_0 and scissors_result_1 :
print("PLAYER COMP/USER WON")
player0_won += 1
if paper_result_0 and scissors_result_1 :
print("PLAYER AI WON")
player1_won += 1
if paper_result_0 and rock_result_1 :
print("PLAYER COMP/USER WON")
player0_won += 1
if scissors_result_0 and rock_result_1 :
print("PLAYER AI WON")
player1_won += 1
if scissors_result_0 and paper_result_1 :
print("PLAYER COMP/USER WON")
player0_won += 1
return draw, player0_won, player1_won
def player0(counter) :
global player0_rock
global player0_paper
global player0_scissors
global choice
## if user play, user need to choice R,P or S
if mode == 1 :
choice_user = str(input("enter R,P or S "))
if choice_user == 'R' :
choice = 1
if choice_user == "P" :
choice = 2
if choice_user == "S" :
choice = 3
## if comp play, comp will play randomly.
if mode == 2 :
choice = random.choice([1, 2, 3])
if choice == 1 :
player0_rock.append(counter)
if choice == 2 :
player0_paper.append(counter)
if choice == 3 :
player0_scissors.append(counter)
## exception case
if choice < 1 or choice > 3 or choice == " " :
print("select again")
player0(counter)
return choice, player0_rock, player0_paper, player0_scissors
## our ai player . ai player moves with using probability of rock,paper and scissors.
def player1(counter) : ## counter as a paramter that used for time of move.
if counter < 10 : ## ai starts with 10 random moves so there is no enough data for move logical.
choice = random.choice([1, 2, 3])
if choice == 1 :
player1_rock.append(counter)
if choice == 2 :
player1_paper.append(counter)
if choice == 3 :
player1_scissors.append(counter)
print("draw,player,ai,#ofgame \n",
compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors),
counter) ## show the result
else :
## ai get the robability of rock,paper and scissors,and decide the move via a rule base system.
prock = p_rock(player0_rock, counter)
ppaper = p_paper(player0_paper, counter)
pscissors = p_scissors(player0_scissors, counter)
if prock > ppaper and prock > pscissors:
if ppaper > pscissors :
player1_paper.append(counter)
if ppaper < pscissors :
player1_rock.append(counter)
if ppaper > prock and ppaper > pscissors :
if pscissors > prock :
player1_scissors.append(counter)
if pscissors < prock :
player1_paper.append(counter)
if pscissors > ppaper and pscissors > prock :
if prock > ppaper :
player1_rock.append(counter)
if prock < ppaper :
player1_scissors.append(counter)
print(prock, ppaper, pscissors) ## we can check the probability of rock,paper and scissors.
## if you want to show results step by step you can use these several prints.
"""
print(player0_rock)
print(player0_paper)
print(player0_scissors)
print(player1_rock)
print(player1_paper)
print(player1_scissors)
"""
print("draw,player,ai,#ofgame \n",
compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors),
counter) ## shot the result
return player1_rock, player1_paper, player1_scissors
# We used Naive Bayes Classification to calculate probability of the moves according to the our previous datas
# https://en.wikipedia.org/wiki/Naive_Bayes_classifier
"""
With this mathematical function,
we are trying to calculate what the next move will be with the previously played data
by making a naive bayes classification.
"""
def p_rock(rock_data, counter) :
var_rock = np.var(rock_data, ddof=1)
mean_rock = np.mean(rock_data)
p_rock = abs((1 / math.sqrt(2 * math.pi * var_rock)) * math.exp(- pow((counter - mean_rock), 2) / abs((2 * var_rock))))
return p_rock
def p_paper(paper_data, counter) :
var_paper = np.var(paper_data, ddof=1)
mean_paper = np.mean(paper_data)
p_paper = abs(
(1 / math.sqrt(2 * math.pi * var_paper)) * math.exp(- pow((counter - mean_paper), 2) / abs((2 * var_paper))))
return p_paper
def p_scissors(scissors_data, counter) :
var_scissors = np.var(scissors_data, ddof=1)
mean_scissors = np.mean(scissors_data)
p_scissors = abs(
(1 / math.sqrt(2 * math.pi * var_scissors)) * math.exp(- pow((counter - mean_scissors), 2) / abs((2 * var_scissors))))
return p_scissors
## counter must be bigger than zero for play the game and it must be smaller than n_times(how many time you want to play?)
while counter > 0 and counter <= n_times :
player0(counter)
player1(counter)
counter += 1
if (player1_won > player0_won):
print("* AI WON THE GAME *")
if (player0_won > player1_won):
print("* COMP/USER WON THE GAME *")
| Python | 222 | 31.171171 | 126 | /rps_ai_game.py | 0.604427 | 0.582564 |
beichao1314/TREC2016 | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 7 16:17:49 2016
@author: xiaobei
"""
import nltk
from nltk.corpus import stopwords
import re
def removeStopWords_1(originSegs):
stops = set(stopwords.words('english'))
resultStr = [seg.lower() for seg in originSegs if seg.lower() not in stops and seg.isalpha()]
return resultStr
def filters(content):
results = re.compile(r'http://[a-zA-Z0-9.?/&=:]*', re.S)
filter = results.sub("", content)
return filter
def preprocess_profile(sentence):
# filterwords = filters(sentence)
words = nltk.word_tokenize(sentence)
removestopwords = removeStopWords_1(words)
result = stemword(removestopwords)
return result
def stemword(word):
porter = nltk.PorterStemmer().stem
result = list(map(porter, word))
return result
| Python | 36 | 21.75 | 97 | /run1_crawlerA/process_profile.py | 0.678877 | 0.659341 |
beichao1314/TREC2016 | refs/heads/master | import pymysql
from process import preprocess
import time as T
import nltk
import math
import operator
class PushSummary():
def __init__(self, lemda, interest_files, time, rest, fa, topicid):
self.topicid = topicid
self.L = len(self.topicid)
self.SumOfLenthOfStream = 0
self.wordInStream = {}
self.lemda = lemda
self.interest_files = interest_files
self.time = time
self.day = 1
self.rest = rest
self.fa = fa
self.tfidfthresholdA = []
self.jsdthresholdA = []
self.lmsthresholdA = []
self.tfidfthresholdB = []
self.jsdthresholdB = []
self.lmsthresholdB = []
self.numofdayA = []
self.numofdayB = []
self.queries_numOfTweet = []
self.queries_numOfWord = []
self.queries_word = []
self.queries_occur = []
self.summaryA = []
self.summaryB = []
self.qoccur = []
self.numofq = []
self.numofqinstream = {}
for i in range(self.L):
# self.numofdayA[number] = 0
# self.numofdayB[number] = 0
self.numofdayA.append(0)
self.numofdayB.append(0)
self.queries_word.append({})
self.queries_occur.append({})
self.summaryA.append([])
self.summaryB.append([])
# self.word_tweet_query.append({})
self.qoccur.append({})
self.numofq.append({})
self.tfidfthresholdA.append(0.7)
self.jsdthresholdA.append(0.04)
self.lmsthresholdA.append(0.02)
self.tfidfthresholdB.append(0.5)
self.jsdthresholdB.append(0.04)
self.lmsthresholdB.append(0.02)
self.queries_numOfTweet.append(0)
self.queries_numOfWord.append(0)
def pushSummarys(self, tweet):
if ('delete' not in tweet) and (tweet['lang'] == 'en'):
if 'retweeted_status' in tweet:
tem = tweet['retweeted_status']
tem['timestamp_ms'] = tweet['timestamp_ms']
tem['created_at'] = tweet['created_at']
tweet = tem
delta = self.time.calculatetime(tweet['created_at'])
if delta >= 1:
for x in range(self.L):
stemwords_interest_profile = self.interest_files[x]
self.numofdayA[x] = 0
self.numofdayB[x] = 0
listofsummaryA = [summary[0] for summary in self.summaryA[x] if summary[1] == self.day]
if len(listofsummaryA) > 0:
self.tfidfthresholdA[x] = min(summaryA[2] for summaryA in listofsummaryA)
listofsummaryB = [summary[0] for summary in self.summaryB[x] if summary[1] == self.day]
if len(listofsummaryB) > 0:
self.tfidfthresholdB[x] = min(summaryB[2] for summaryB in listofsummaryB)
sumoflen = sum(summaryBBBB[5] for summaryBBBB in listofsummaryB)
ADL = sumoflen / len(listofsummaryB)
lenofq = len(stemwords_interest_profile)
result = []
for summaryBBB in listofsummaryB:
score = 0
TF = summaryBBB[4]
for q in stemwords_interest_profile:
tf = TF[q]
avgtf = sum(TF[qq] for qq in stemwords_interest_profile) / len(TF)
RITF = math.log2(1 + tf) / math.log2(1 + avgtf)
LRTF = tf * math.log2(1 + ADL / summaryBBB[5])+0.0001
w = 2 / (1 + math.log2(1 + lenofq))
TFF = w * RITF / (1 + RITF) + (1 - w) * LRTF / (1 + LRTF)
IDF = math.log((len(listofsummaryB) + 1) / (self.qoccur[x][q] + 1)) + 0.0001
AEF = self.numofq[x][q] / (self.qoccur[x][q] + 1)
TDF = IDF * AEF / (1 + AEF)
sim = TFF * TDF
score += sim
del tf, avgtf, RITF, LRTF, w, TFF, IDF, AEF, TDF, sim
result.append([score, summaryBBB[1]])
del listofsummaryB
result.sort(key=operator.itemgetter(0), reverse=True)
j = 1
for i in result:
if (self.day) > 9:
d = '201608' + str(self.day)
else:
d = '2016080' + str(self.day)
with open('B.txt', 'a') as ff:
ff.write(
'%s %s Q0 %s %s %s CCNUNLPrun2\n' % (
d, self.topicid[x], i[1], str(j), i[0]))
j = j + 1
self.time.settime()
self.day = self.day + 1
content = tweet['text']
stemwords_tweet = preprocess(content)
del content
wordInTweet = {}
if stemwords_tweet == False:
pass
else:
numOfWordAtweet = len(stemwords_tweet)
self.SumOfLenthOfStream = numOfWordAtweet + self.SumOfLenthOfStream
id_str = tweet['id_str']
for word in stemwords_tweet:
if word in self.wordInStream:
self.wordInStream[word] += 1
else:
self.wordInStream[word] = 1
if word in wordInTweet:
wordInTweet[word] += 1
else:
wordInTweet[word] = 1
for x in range(self.L):
stemwords_interest_profile = self.interest_files[x]
for q in stemwords_interest_profile:
if q in self.numofqinstream:
self.numofqinstream[q] += stemwords_tweet.count(q)
else:
self.numofqinstream[q] = stemwords_tweet.count(q)
for x in range(self.L):
stemwords_interest_profile = self.interest_files[x]
count = sum(stemwords_tweet.count(wordsss) for wordsss in stemwords_interest_profile)
lenofq = len(stemwords_interest_profile)
if count >= 1:
qt = {}
for qqq in stemwords_interest_profile:
if qqq in qt:
qt[qqq] += stemwords_tweet.count(qqq)
else:
qt[qqq] = stemwords_tweet.count(qqq)
lms = 0
samewords = [q for q in stemwords_interest_profile if q in stemwords_tweet]
for qq in samewords:
Pq = self.lemda * 1.0 / float(lenofq) + (1 - self.lemda) * float(
self.numofqinstream[qq]) / float(
self.SumOfLenthOfStream)
Pt = self.lemda * qt[qq] / float(numOfWordAtweet) + (1 - self.lemda) * float(
self.numofqinstream[qq]) / float(self.SumOfLenthOfStream)
M = 0.5 * (Pq + Pt)
lms += 0.5 * Pq * math.log(Pq / M) + 0.5 * Pt * math.log(Pt / M)
if lms <= self.lmsthresholdA[x]:
sumoftfidf = 0.0
for word in stemwords_tweet:
if word in self.queries_word[x]:
self.queries_word[x][word] += 1
else:
self.queries_word[x][word] = 1
for word in set(stemwords_tweet):
if word not in self.queries_occur[x]:
self.queries_occur[x][word] = 1
else:
self.queries_occur[x][word] += 1
self.queries_numOfWord[x] += numOfWordAtweet
self.queries_numOfTweet[x] += 1
for word in stemwords_tweet:
tf = self.queries_word[x][word] / self.queries_numOfWord[x]
idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word])
sumoftfidf = sumoftfidf + tf * idf
if sumoftfidf >= self.tfidfthresholdA[x] and self.numofdayA[x] < 10:
listofsummaryA = [summary[0] for summary in self.summaryA[x]]
if len(listofsummaryA) > 0:
jsd = []
for summary in listofsummaryA:
sumofjsd = 0
tf = {}
for wordss in summary[0]:
if wordss in tf:
tf[wordss] += 1
else:
tf[wordss] = 1
sameword = [word for word in stemwords_tweet if
word in summary[0]]
if len(sameword) > 0:
for word in sameword:
Pti = float(wordInTweet[word]) / float(numOfWordAtweet)
Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)
thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi
Ptj = float(tf[word]) / float(len(summary[0]))
Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)
thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj
# sumofjsd += thetaTi * math.log(thetaTi / thetaTj)
M = float((thetaTi + thetaTj) / 2)
sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * (
thetaTj * math.log(thetaTj / M))
jsd.append(sumofjsd)
else:
jsd.append(0.06)
JSD = min(jsd)
else:
JSD = 0.04
# print('kld:' + str(JSD))
if JSD >= self.jsdthresholdA[x]:
#self.rest.Post(self.topicid[x], id_str)
self.lmsthresholdA[x] = lms
self.jsdthresholdA[x] = JSD
self.numofdayA[x] += 1
a = [stemwords_tweet, id_str, sumoftfidf, JSD]
self.summaryA[x].append([a, self.day])
self.fa.write('%s %s tfidf:%s jsd:%s lms:%s\n' % (self.day, self.topicid[x], sumoftfidf, JSD,lms))
if lms <= self.lmsthresholdB[x]:
sumoftfidf = 0.0
for word in stemwords_tweet:
if word in self.queries_word[x]:
self.queries_word[x][word] += 1
else:
self.queries_word[x][word] = 1
for word in set(stemwords_tweet):
if word not in self.queries_occur[x]:
self.queries_occur[x][word] = 1
else:
self.queries_occur[x][word] += 1
self.queries_numOfWord[x] += numOfWordAtweet
self.queries_numOfTweet[x] += 1
for word in stemwords_tweet:
tf = self.queries_word[x][word] / self.queries_numOfWord[x]
idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word])
sumoftfidf = sumoftfidf + tf * idf
if sumoftfidf >= self.tfidfthresholdB[x] and self.numofdayB[x] < 100:
listofsummaryB = [summary[0] for summary in self.summaryB[x]]
if len(listofsummaryB) > 0:
jsd = []
for summary in listofsummaryB:
sumofjsd = 0
sameword = [word for word in stemwords_tweet if word in summary[0]]
tf = {}
for wordss in summary[0]:
if wordss in tf:
tf[wordss] += 1
else:
tf[wordss] = 1
if len(sameword) > 0:
for word in sameword:
Pti = float(wordInTweet[word]) / float(numOfWordAtweet)
Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)
thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi
Ptj = float(tf[word]) / float(len(summary[0]))
Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)
thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj
# sumofjsd += thetaTi * math.log(thetaTi / thetaTj)
M = float((thetaTi + thetaTj) / 2)
sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * (
thetaTj * math.log(thetaTj / M))
jsd.append(sumofjsd)
else:
jsd.append(0.06)
JSD = min(jsd)
else:
JSD = 0.04
if JSD >= self.jsdthresholdB[x]:
self.numofdayB[x] += 1
lenoflistB=len(listofsummaryB)
self.jsdthresholdB[x]=(self.jsdthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1)
self.lmsthresholdB[x]=(self.lmsthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1)
TF = {}
for q in stemwords_interest_profile:
TF[q] = stemwords_tweet.count(q)
if q in stemwords_tweet:
if q in self.qoccur[x]:
self.qoccur[x][q] += 1
else:
self.qoccur[x][q] = 1
else:
self.qoccur[x][q] = 0
if q in self.numofq[x]:
self.numofq[x][q] += stemwords_tweet.count(q)
else:
self.numofq[x][q] = stemwords_tweet.count(q)
b = [stemwords_tweet, id_str, sumoftfidf, JSD, TF, numOfWordAtweet]
self.summaryB[x].append([b, self.day])
pass
| Python | 296 | 56.270271 | 134 | /run2_crawlerA/rewritesummary.py | 0.382138 | 0.371579 |
beichao1314/TREC2016 | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 8 19:37:33 2016
@author: xiaobei
"""
import pycurl
import requests
import json
import logging
logging.basicConfig(level=logging.INFO)
class REST(object):
def __init__(self,clientid):
self.clientid=clientid
self.c = pycurl.Curl()
self.c.setopt(pycurl.CUSTOMREQUEST, 'POST')
self.c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])
def GetTopic(self):
# curl -H 'Content-Type: application/json' hostname.com/topics/abcdefghijk
url = "http://54.164.151.19:80/topics/" + self.clientid
header = {'content-type': 'application/json'}
r = requests.get(url, headers=header)
return json.loads(r.text)
def Post(self, topicid, tweetid):
url = "http://54.164.151.19:80/tweet/" + topicid + "/" + tweetid + "/" + self.clientid
self.c.setopt(pycurl.URL, url)
self.c.perform()
# r = self.c.getinfo(pycurl.HTTP_CODE)
return True
| Python | 32 | 30.1875 | 93 | /run2_crawlerA/Rest.py | 0.625626 | 0.590591 |
beichao1314/TREC2016 | refs/heads/master | from py_bing_search import PyBingWebSearch
# s1= 9uCkTYlAG9x4iPdxAeDuQipYvc2vEn6oUbPKZJnFlVY
# s2=3L8LwEROeBFVSA1FwUVKLfIO+Ue979rarr+Y4mBZwaE
s3 = 'E+ok1GP7qpi6xgtE0yfsbrQFZSElgMBK2ZD1kwf/WXA'
s4 = 'AKvk0/D9XzJuCQA9n/a+TFbqwOFder9xd9Yj/22ivA8'
s5='r8OUqrE+DW/W4qs8ShfN2ljAU8214AkuksvYy7iMPGk'
def search(search_term):
bing_web = PyBingWebSearch(s5, search_term,web_only=False)
first_ten_result = bing_web.search(limit=10, format='json')
return first_ten_result
| Python | 12 | 26.333334 | 65 | /run2_crawlerA/extension.py | 0.698171 | 0.652439 |
beichao1314/TREC2016 | refs/heads/master | # from datetime import datetime
import datetime
import time as T
from email.utils import parsedate
class Time(object):
def __init__(self, firsttime):
self.firsttime = parsedate(firsttime)
self.firsttime = datetime.datetime.fromtimestamp(T.mktime(self.firsttime))
def calculatetime(self, time):
time = parsedate(time)
time = datetime.datetime.fromtimestamp(T.mktime(time))
t = (time - self.firsttime).days
return t
def settime(self):
self.firsttime = self.firsttime + datetime.timedelta(hours=24)
| Python | 19 | 28.894737 | 82 | /run2_crawlerA/estimate_time.py | 0.683099 | 0.679577 |
beichao1314/TREC2016 | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 7 17:10:58 2016
@author: xiaobei
"""
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import logging.handlers
from rewritesummary import PushSummary
from Rest import REST
import json
from estimate_time import Time
from extension import search as sch
import logging
from process_profile import preprocess_profile
import time
consumer_key = "bEyQ2mZRAABCIdZajeaYhpnUe"
consumer_secret = "kJUa3IHjUFm1znHCoAnaDQY7RUPGzcMqveFcgvsh3i7v4Jta3b"
access_token = "2910563640-Z77URQhoPhDsg393yazywkd0WHjjqWrn1tlV8aH"
access_token_secret = "gPRcz33gphQL2VTDEQ40Uu8yTqVNoOwXZ1TAMQYSV4MHm"
logging.basicConfig(level=logging.INFO)
class TweetListener(StreamListener):
def __init__(self, api=None):
super(TweetListener, self).__init__(api)
self.logger = logging.getLogger('tweetlogger')
# print('a')
statusHandler = logging.handlers.TimedRotatingFileHandler('status.log', when='H', encoding='utf-8', utc=True)
statusHandler.setLevel(logging.INFO)
self.logger.addHandler(statusHandler)
warningHandler = logging.handlers.TimedRotatingFileHandler('warning.log', when='H', encoding='utf-8', utc=True)
warningHandler.setLevel(logging.WARN)
self.logger.addHandler(warningHandler)
logging.captureWarnings(True)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.WARN)
self.logger.addHandler(consoleHandler)
# self.logger.setLevel(logging.INFO)
self.count = 0
def on_data(self, data):
data=json.loads(data,encoding='utf-8')
# print(data)
pushSummary.pushSummarys(data)
self.count += 1
# self.logger.info(data)
# with open('test.txt','a') as f:
# f.write(data+'\n')
# print(data)
# tweet=json.load(data)
# print(type(tweet))
# pushSummary.pushSummarys(json.loads(data))
# print(self.count)
if self.count % 1000 == 0:
print("%d statuses processed %s" % (self.count, time.strftime('%X', time.localtime(time.time()))))
return True
def on_error(self, exception):
self.logger.warning(str(exception))
if __name__ == '__main__':
listener = TweetListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
# clientid = rest.Getclientid()
with open('clientidrun2.txt', 'r') as f:
clientid = f.read()
rest = REST(clientid)
# topics = rest.GetTopic()
# interest_files = {}
# count = 0
# x = 0
# topicid = {}
# for i in topics:
# number = i['topid']
# title = i['title']
# Desc = i['description']
# Narr = i['narrative']
# title = preprocess_profile(title)
# Desc = preprocess_profile(Desc)
# Narr = preprocess_profile(Narr)
# tf = {}
# for word in title:
# if word in tf:
# tf[word] += 1
# else:
# tf[word] = 1
# for word in Desc:
# if word in tf:
# tf[word] += 1
# else:
# tf[word] = 1
# for word in Narr:
# if word in tf:
# tf[word] += 1
# else:
# tf[word] = 1
# a = sorted(tf.items(), key=lambda d: d[1], reverse=True)
# b = [d[0] for d in a[0:5]]
# stemwords_interest_profile = b
# b = ' '.join(b)
# s = sch(b)
# count += 1
# logging.info(count)
# search = []
# stf = {}
# for i in s:
# j = preprocess_profile(i.title)
# for k in j:
# f = []
# for l in k:
# if ord(l) < 127:
# f.append(l)
# search.append(''.join(f))
# for word in search:
# if word in stf:
# stf[word] += 1
# else:
# stf[word] = 1
# d = sorted(stf.items(), key=lambda d: d[1], reverse=True)
# e = 0
# for n in range(len(d)):
# if d[n][0] not in stemwords_interest_profile:
# stemwords_interest_profile.append(d[n][0])
# e += 1
# if e >= 5:
# break
# interest_files[x] = stemwords_interest_profile
# topicid[x] = number
# x += 1
with open('q_e.txt', 'r') as f:
c = f.read()
interest_files = eval(c)
with open('q_x.txt', 'r') as ff:
d = ff.read()
topicid = eval(d)
# with open('q_e.txt', 'w') as f:
# f.write(str(interest_files))
# with open('q_x.txt', 'w') as ff:
# ff.write(str(topicid))
times = Time('Tue Aug 02 00:00:00 +0000 2016')
fa = open('A.txt', 'a', encoding='utf-8')
pushSummary = PushSummary(0.9, interest_files, times, rest, fa, topicid)
while True:
try:
stream.sample()
except Exception as ex:
print(str(ex))
pass
| Python | 160 | 30.518749 | 119 | /run2_crawlerA/crawler.py | 0.537775 | 0.524291 |
frontier96/Covid-data-analysis-and-prediction | refs/heads/main | # mortality_rate = data_states1['Mortality_Rate']
# #operate_data_counties1['mortality_rate'] = operate_data_counties1[operate_data_counties1['State'].isin(mortality_rate.index)]
# plt.figure(figsize = (5,5))
# sns.distplot(data_states1['Mortality_Rate'])
# plt.figure(figsize = (5,5))
# sns.scatterplot(x=data_states1['Province_State'],y=data_states1['Mortality_Rate'])
#added 5/10 sunday by Jack
mortality_rate = data_states1['Mortality_Rate']
#operate_data_counties1['mortality_rate'] = operate_data_counties1[operate_data_counties1['State'].isin(mortality_rate.index)]
plt.figure(figsize = (10,3))
sns.distplot(data_states1['Mortality_Rate'])
plt.figure(figsize = (10,7))
p1 = sns.scatterplot(x=data_states1['People_Tested'],y=data_states1['Mortality_Rate'])
for line in range(0,data_states1.shape[0]):
if data_states1['People_Tested'][line] >0 and data_states1['Mortality_Rate'][line] >0:
p1.text(data_states1['People_Tested'][line]+0.2, data_states1['Mortality_Rate'][line], data_states1['Province_State'][line],
#data_states1['People_Tested'],data_states1['Mortality_Rate'],data_states1['Province_State'],
horizontalalignment='left', size='small', color='black') #, weight='semibold')
#mortality_rate.value_counts()
#data_states1[data_states1['Mortality_Rate']>8]
state_abbreviation = data_counties1.groupby(['StateName', 'State']).agg(sum)#['StateName']
state_dict_original = state_abbreviation.reset_index()[['StateName', 'State']]
new_state_df = pd.DataFrame({'StateName': ['AK','VI', 'PR', "HI", 'GU',"AS",'MP'],
'State': ['Alaska', 'Virgin Islands','Puerto Rico','Hawaii', "Guam",'American Samoa'
,'Northern Marianas']})
state_dict_combined = pd.concat([state_dict_original, new_state_df])
state_dict = state_dict_combined.set_index('StateName')['State']
mapped_state = data_counties1['StateName'].map(state_dict)
mapped_state.isna().sum()
#state_dict
data_counties1['State updated'] = mapped_state
mortality_dict = data_states1[data_states1['Country_Region'] == 'US'][['Province_State', 'Mortality_Rate']].set_index('Province_State')['Mortality_Rate']
mapped_mortality = data_counties1['State updated'].map(mortality_dict)
mapped_mortality.isna().sum() #should be American Samoa
data_counties1['Mortality Rate'] = mapped_mortality
data_counties1.head()
first_case_count = data_conf1['First_Case'].value_counts()
plt.figure(figsize=(12, 9))
plt.bar(first_case_count.index, first_case_count.values)
plt.title("Distribution of First discovered case by county")
first_death_count = data_death1['First_Death'].value_counts()
plt.figure(figsize=(12, 9))
plt.bar(first_death_count.index, first_death_count.values)
plt.show()
operate_data_counties1 = data_counties1[['CountyName', 'State updated', 'stay at home', 'public schools', '>500 gatherings', 'entertainment/gym', 'restaurant dine-in',
'Mortality Rate']][data_counties1['State updated'] != 'American Samoa'][data_counties1['State updated'] != 'Northern Marianas']
#operate_data_counties1[operate_data_counties1['CountyName']=='Washington']
operate_data_counties1['Mortality Rate'] = operate_data_counties1['Mortality Rate'].fillna(3.413353)
operate_data_counties1_with_states = operate_data_counties1
operate_data_counties1_with_states
operate_data_counties1.isnull().sum()
operate_data_counties1_with_states['stay at home'] = operate_data_counties1_with_states['stay at home'].fillna(np.mean(operate_data_counties1_with_states['stay at home']))
operate_data_counties1_with_states['public schools'] = operate_data_counties1_with_states['public schools'].fillna(np.mean(operate_data_counties1_with_states['public schools']))
operate_data_counties1_with_states['>500 gatherings'] = operate_data_counties1_with_states['>500 gatherings'].fillna(np.mean(operate_data_counties1_with_states['>500 gatherings']))
operate_data_counties1_with_states.isnull().sum()
#operate_data_counties1_with_states[operate_data_counties1_with_states['stay at home'].isnull()]
#operate_data_counties1_with_states['stay at home'].value_counts()
#operate_data_counties1_with_states['public schools'].value_counts()
#operate_data_counties1_with_states['>500 gatherings'].value_counts()
operate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_conf1[["", "First_Case", "First_Hundred_Case"]], on = )
data_counties1_PCA = data_counties1.select_dtypes(['number']).drop(columns=['STATEFP','COUNTYFP'])
# center our data and normalize the variance
df_mean = np.mean(data_counties1_PCA)
df_centered = data_counties1_PCA - df_mean
df_centered_scaled = df_centered / (np.var(df_centered))**0.5
data_counties1_PCA = df_centered_scaled
data_counties1_PCA_fillna =data_counties1_PCA.fillna(method = 'ffill') #use the previous valid data to fill NaN,
#good here since closeby county likely to be in the same State
data_counties1_PCA_fillna2 = data_counties1_PCA_fillna.fillna(0) #fill NaN with no previous valid data (whole column is NaN)
#sum(data_counties1_PCA_fillna2.isna().sum())
data_counties1_PCA_fillna2
#PCA
u, s, vt = np.linalg.svd(data_counties1_PCA_fillna2, full_matrices=False)
P = u @ np.diag(s)
df_1st_2_pcs =pd.DataFrame(P[:,0:2], columns=['pc1', 'pc2'])
first_2_pcs = df_1st_2_pcs
#jittered scatter plot (added noise)
first_2_pcs_jittered = first_2_pcs + np.random.normal(0, 0.1, size = (len(first_2_pcs), 2))
sns.scatterplot(data = first_2_pcs_jittered, x = "pc1", y = "pc2");
#a better looking scatter plot with labels
#import plotly.express as px
#px.scatter(data_frame = first_2_pcs_jittered, x = "pc1", y = "pc2", text = list(df_1972_to_2016.index)).update_traces(textposition = 'top center')
#scree plot
plt.figure(figsize = (10,10))
x = list(range(1, s.shape[0]+1))
plt.plot(x, s**2 / sum(s**2));
plt.xticks(x, x);
plt.xlabel('PC #');
plt.ylabel('Fraction of Variance Explained');
from sklearn.model_selection import train_test_split
train, test = train_test_split(operate_data_counties1_with_states, test_size=0.1, random_state=42)
plt.figure(figsize = (5,5))
#sns.regplot(operate_data_counties1_with_states['Mortality_Rate'])
operate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_first_case, how = "inner", left_on = ['CountyName', 'State updated'], right_on = ['County_Name', 'Province_State'])
operate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_first_death, how = "left", left_on = ['CountyName', 'State updated'], right_on = ['County_Name', 'Province_State'])
operate_data_counties1_with_states.head()
operate_data_counties1_with_states.drop(['Province_State_x', 'Province_State_y', 'County_Name_y', 'County_Name_x'], axis = 1, inplace = True)
operate_data_counties1_with_states.head()
time_since_first_case = operate_data_counties1_with_states.copy()
time_since_first_case['stay at home'] = time_since_first_case['stay at home'] - time_since_first_case['First_Case']
time_since_first_case['public schools'] = time_since_first_case['public schools'] - time_since_first_case['First_Case']
time_since_first_case['entertainment/gym'] = time_since_first_case['entertainment/gym'] - time_since_first_case['First_Case']
time_since_first_case['>500 gatherings'] = time_since_first_case['>500 gatherings'] - time_since_first_case['First_Case']
time_since_first_case['restaurant dine-in'] = time_since_first_case['restaurant dine-in'] - time_since_first_case['First_Case']
time_since_first_case.head()
X_train = train.drop(['CountyName', 'State updated','Mortality Rate'], axis=1)
Y_train = train['Mortality Rate']
X_train[:5], Y_train[:5]
from sklearn.linear_model import LinearRegression
from sklearn import metrics
model = LinearRegression(fit_intercept=True) # should fit intercept be true?
model.fit(X_train, Y_train)
Y_prediction = model.predict(X_train)
training_loss = metrics.mean_squared_error(Y_prediction, Y_train)
print("Training loss: ", training_loss)
plt.figure(figsize = (5,5))
sns.regplot(Y_prediction, Y_train)
plt.figure(figsize = (5,5))
sns.regplot(Y_prediction, Y_train-Y_prediction)
# perform cross validation
from sklearn import model_selection as ms
# finding which features to use using Cross Validation
errors = []
range_of_num_features = range(1, X_train.shape[1] + 1)
for N in range_of_num_features:
print(f"Trying first {N} features")
model = LinearRegression()
# compute the cross validation error
error = ms.cross_val_score(model, X_train.iloc[:, 0:N], Y_train).mean()
print("\tScore:", error)
errors.append(error)
best_num_features = np.argmax(errors) + 1
print (best_num_features)
best_err = min(errors)
print(f"Best choice, use the first {best_num_features} features")
#===================================================================================================================
| Python | 292 | 29.910959 | 198 | /old/temp.py | 0.674352 | 0.652151 |
nurtai00/WebDevProjectBack | refs/heads/main | from rest_framework import serializers
from api.models import Category, Product, Cart, User
class CategoryModelSerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
class ProductSerializer(serializers.Serializer):
class Meta:
model = Product
fields = ('name', 'description', 'price', 'category')
class CartSerializer(serializers.Serializer):
class Meta:
model = Cart
fields = ('username', 'address', 'book')
class UserModelSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password')
| Python | 26 | 24.846153 | 61 | /projectback/api/serializers.py | 0.641834 | 0.641834 |
nurtai00/WebDevProjectBack | refs/heads/main | from django.urls import path
from api import views
from api.views import product_list, product_detail, category_list, product2_list, category2_list
urlpatterns = [
path('api/product', product_list),
path('api/product/<int:product_id>/', product_detail),
path('api/category', category_list),
path('api/product2', product2_list),
path('api/category2', category2_list),
path('api/product-list', views.ProductViewSet.as_view(), name='product-list'),
path('product-list/<str:pk>/', views.ProductDetailViewSet.as_view(), name='product-detail')
]
| Python | 15 | 37.066666 | 96 | /projectback/api/urls.py | 0.691126 | 0.680887 |
nurtai00/WebDevProjectBack | refs/heads/main | from api.models import Product, Category
from django.http.response import JsonResponse
from api.serializers import CategoryModelSerializer, ProductSerializer, CartSerializer, UserModelSerializer
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
from rest_framework.response import Response
def product_list(request):
product = Product.objects.all()
product_json = [product.to_json() for product in product]
return JsonResponse(product_json, safe=False)
def product_detail(request, product_id):
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist as e:
return JsonResponse({'message': str(e)}, status=400)
return JsonResponse(product.to_json())
def category_list(request):
category = Category.objects.all()
category_json = [category.to_json() for category in category]
return JsonResponse(category_json, safe=False)
@api_view(['GET', 'POST'])
def product2_list(request):
if request.method == 'GET':
try:
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse({"status": "500"}, safe=False)
if request.method == 'POST':
try:
category = Category.objects.get(name=request.data['category'])
except:
return JsonResponse({"status": "200"}, safe=False)
Product.objects.create(
category=category,
name=request.data['name'],
description=request.data['description'],
image=request.data['image'],
price=request.data['price']
)
return JsonResponse({"status": "200"}, safe=False)
@api_view(['GET', 'POST'])
def category2_list(request):
if request.method == 'GET':
try:
categories = Category.objects.all()
serializer = CategoryModelSerializer(categories, many=True)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse({"status": "505"}, safe=False)
if request.method == 'POST':
try:
category = Category.objects.get(name=request.data['category'])
serializer = CategoryModelSerializer(category, many=True)
return JsonResponse(serializer.data, safe=False)
except:
return JsonResponse({'status': '200'}, safe=False)
class ProductViewSet(APIView):
@staticmethod
def get(request):
queryset = Product.objects.all()
serializer = ProductSerializer(queryset, many=True)
return JsonResponse(serializer.data, safe=False)
class ProductDetailViewSet(APIView):
@staticmethod
def get(request, pk):
queryset = Product.objects.get(id=pk)
serializer = ProductSerializer(queryset, many=False)
return Response(serializer.data)
@csrf_exempt
@api_view(["POST"])
@permission_classes((AllowAny,))
def login(request):
username = request.data.get("username")
password = request.data.get("password")
if username is None or password is None:
return Response({'error': 'Please provide both username and password'},
status=HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
if not user:
return Response({'error': 'Invalid Credentials'},
status=HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key},
status=HTTP_200_OK)
@csrf_exempt
@api_view(["GET"])
def sample_api(request):
data = {'sample_data': 123}
return Response(data, status=HTTP_200_OK)
# def get(request):
# queryset = Product.objects.all()
# serializer = ProductSerializer(queryset, many=True)
# return Response(serializer.data)
#
#
# class ProductViewSet(APIView):
# pass
#
#
# def get(request, pk):
# queryset = Product.objects.get(id=pk)
# serializer = ProductSerializer(queryset, many=False)
# return Response(serializer.data)
#
#
# class ProductDetailViewSet(APIView):
# pass
| Python | 139 | 32.27338 | 107 | /projectback/api/views.py | 0.650294 | 0.641058 |
nurtai00/WebDevProjectBack | refs/heads/main | from django.contrib import admin
from api.models import Product, Category, Cart, User
# Register your models here.
admin.site.register(Product),
admin.site.register(Category),
admin.site.register(Cart),
admin.site.register(User)
| Python | 8 | 27.75 | 52 | /projectback/api/admin.py | 0.768908 | 0.768908 |
nurtai00/WebDevProjectBack | refs/heads/main | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(max_length=500, default='')
def to_json(self):
return {
'id': self.id,
'name': self.name,
'description': self.description
}
class Product(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(max_length=500, default='')
price = models.IntegerField(default=0)
category = models.ForeignKey(Category, null=True, on_delete=models.CASCADE, blank=True)
def to_json(self):
return {
'id': self.id,
'name': self.name,
'description': self.description,
'price': self.price
}
class Cart(models.Model):
username = models.CharField(max_length=50)
address = models.TextField()
book = models.ForeignKey(Product, null=True, on_delete=models.CASCADE, blank=True)
class User(models.Model):
username = models.CharField(max_length=20)
password = models.CharField(max_length=2222)
| Python | 39 | 27.179487 | 91 | /projectback/api/models.py | 0.614236 | 0.595782 |
nurtai00/WebDevProjectBack | refs/heads/main | # Generated by Django 3.2.2 on 2021-05-07 19:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=99)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=2222)),
],
),
migrations.RemoveField(
model_name='product',
name='address',
),
migrations.RemoveField(
model_name='product',
name='city',
),
migrations.AddField(
model_name='product',
name='price',
field=models.IntegerField(default=0),
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('address', models.TextField()),
('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.product')),
],
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.category'),
),
]
| Python | 56 | 33.55357 | 130 | /projectback/api/migrations/0002_auto_20210508_0140.py | 0.524862 | 0.509794 |
DonJayamanne/vscode-python-samples | refs/heads/master | import ptvsd
import time
import os
print(os.curdir)
print("Waiting to attach")
address = ('0.0.0.0', 3000)
ptvsd.enable_attach('my_secret', address)
ptvsd.wait_for_attach()
time.sleep(2)
print("attached")
print("end")
| Python | 15 | 13.8 | 41 | /remote-debugging-locally/sample.py | 0.711712 | 0.671171 |
DonJayamanne/vscode-python-samples | refs/heads/master | from django.shortcuts import render
from django.template import loader
def index(request):
context = {
'value_from_server':'one',
'another_value_from_server':'two'
}
return render(request, 'index.html', context)
# from django.shortcuts import render
# from django.shortcuts import render_to_response
# # Create your views here.
# from django.http import HttpResponse
# def index(request):
# return render_to_response('index.html', context={'value_from_server':'one', 'another_value_from_server':'two'})
# #return HttpResponse("Hello, world. You're at the home index.")
| Python | 20 | 29.549999 | 117 | /sample-django/home/views.py | 0.698854 | 0.698854 |
AEJ-FORMATION-DATA-IA/exercicepython-kmk | refs/heads/main | #!/usr/bin/env python
# coding: utf-8
# In[92]:
A = 15
B = 4
C = A + B
print("le résultat de ", A, "+", B," est ",C)
# In[93]:
#la multiplication des deux variables
D = A * B
print("le résultat de la multiplication de ", A,"*",B ," est ",D)
#la puissance
E = A**B
print("le resultat de la puissance de ", A,"**", B ,"est ",E)
#la division retournant la partie decimal
F = A / B
print("le resultat de cette opération ", A,"/",B," est ",F)
#la division retournant la partie entier
G = A // B
print("le resultat de cette opération ", A,"//",B," est ",G)
#le reste de notre division
H = A % B
print("le reste cette opération ", A,"%",B," est ",H)
# In[94]:
#creation d'un dictionnaire et remplissage par defaut
dico_igs = {
A + B : C,
A * B : D,
A**B : E,
A/B : F,
A//B : G,
A%B : H
}
print([A+B])
# In[96]:
#Ajouter un objet dans dictionnaire
dico_igs["IA"]="Groupe 1"
print(dico_igs)
# In[98]:
#modifier un objet du dictionaire
dico_igs["IA"]="Data IA Groupe 1"
print(dico_igs)
# In[99]:
#suppression d'un objet dans le dictionnaire
dico_igs.pop(A+B)
print(dico_igs)
# In[101]:
#afficher la liste des clés d'un dictionnaire
for cle in dico_igs.keys():
print (cle)
# In[102]:
#afficher la liste des valeurs d'un dictionnaire
for valeur in dico_igs.values():
print(valeur)
# In[103]:
#afficher la liste cle-valeur du dictionnaire
for cle,valeur in dico_igs.items():
print(cle,":", valeur)
# In[107]:
# creation de notre tuple
tuple_igs = (A,B,C)
print(tuple_igs)
# In[108]:
#ajout de valeur a notre tuple
#on ne peut pas ajouter de valeur dans un tuples car On utilisera un tuple pour définir des sortes de constantes qui n'ont donc pas vocation à changer.
# In[133]:
#les listes
#creation d'une liste
list_igs=["A","B","C","D"]
print(list_igs)
# In[135]:
liste1 = ["A","B","C","D"]
# In[134]:
liste2 = [A, B, C, D]
print(liste2)
# In[136]:
liste3 = [liste1, liste2]
print(liste3)
# In[137]:
#ajouter E et F a la liste1
liste1.append("E, F")
print(liste3)
# In[138]:
#Supprimer B de la liste1
liste1.remove('B')
print(liste1)
# In[140]:
liste1[0]=('G')
print(liste1)
| Python | 162 | 12.339506 | 151 | /exercice IGS.py | 0.625809 | 0.593432 |
AEJ-FORMATION-DATA-IA/exercicepython-kmk | refs/heads/main | A = input("entrez un nombre entier: ")
try:
A = int(A)
except:
A=input("\n Erreur !!! Veuillez entrer un nombre entier: ")
A=int(A)
B=input("entrez un deuxieme nombre entier: ")
try:
B = int(B)
except:
B = input("\n Erreur !!!, Votre nombre doit etre un nombre entier: ")
B = int(B)
C = A + B
print("le resultat de ",A," + ",B," = ",C) | Python | 14 | 24.714285 | 73 | /temp.py | 0.579387 | 0.579387 |
torchioalexis/python_basico | refs/heads/main | def run():
square_root = int(input("Ingrese un número para calcular su raíz cuadrada: "))
square = 0
while square**2 < square_root:
square += 1
if square**2 == square_root:
print ("La raíz cuadrada de", square_root, "es", square)
else:
print (square_root, "no tiene raíz cuadrada exacta")
if __name__ == "__main__":
run() | Python | 14 | 25.642857 | 82 | /exha_enum.py | 0.580645 | 0.569892 |
Sohan-Pramanik/boilermake | refs/heads/main | # Developed by matthew-notaro, nalinahuja22, and ClarkChan1
| Python | 1 | 59 | 59 | /bin/timeline.py | 0.816667 | 0.766667 |
Sohan-Pramanik/boilermake | refs/heads/main | # Developed by matthew-notaro, nalinahuja22, and ClarkChan1
import os
import sys
class Audio:
def __init__(self, afile):
# Audio File Path
self.afile = afile
# Audio Analysis
self.track = []
def analyze(self):
# Audio File Duration
duration = librosa.get_duration(filename = self.afile)
# Iterate Over Audio
for i in range(int(duration)):
data, sr = librosa.load(self.afile, offset = i, duration = 1)
onset = librosa.onset.onset_strength(data, sr = sr)
tempo = librosa.beat.tempo(onset_envelope = onset, sr = sr)
print(tempo)
obj = Audio("../media/audio/solstice.mp3")
obj.analyze()
| Python | 30 | 22.766666 | 73 | /bin/audio.py | 0.596073 | 0.58906 |
jj240396/Medium | refs/heads/master |
#df - table containing the historical stock prices for the past 10 years.
#df_weights - dict containing the weightage of each stock in the portfolio
#converting the df_weights dict to dataframe
weightage = pd.DataFrame.from_dict(df_weights)
weightage = weightage.transpose()
weightage.columns = ['weightage']
np.sum(weightage['weightage'])
weightage.reset_index(inplace=True)
weightage.columns = ['stock','weights']
#calculating the annual return
df = df.groupby(['stock','year']).agg(
{
'avg':'mean'
})
df['prev_avg'] = df.groupby(['stock'])['avg'].shift(1)
df.reset_index(inplace=True)
df.dropna(inplace=True)
df['return'] = (df['avg'] - df['prev_avg'])/df['prev_avg']
#calculating the weighted annual return
df = df.merge(weightage,on='stock')
df['weighted_return'] = df['return']*df['weights']
#pivoting the table to get the covariance matrix and calculate the portfolio standard deviation
df_pivot = df.pivot('year', 'stock', 'weighted_return')
df_pivot.reset_index(inplace=True)
cov_matrix = df_pivot.cov()
for i in range(len(cov_matrix)):
for j in range(len(cov_matrix.columns)):
if i != j:
cov_matrix.iloc[i,j] = 2*cov_matrix.iloc[i,j]
portfolio_std_deviation = np.sqrt(np.sum(cov_matrix.sum(axis=0)))
#calculating the expected portfolio return
df_mean = df.groupby(['stock']).agg(
{
'return':'mean'
})
df_mean.columns = ['expected_return']
df_std = df.groupby(['stock']).agg(
{
'return':'std'
})
df_std.columns = ['standard_deviation']
df_stats = df_mean.merge(df_std,on='stock')
df_stats.reset_index(inplace=True)
df_stats = df_stats.merge(weightage,on='stock')
df_stats['expected_return_weighted'] = df_stats['expected_return']*df_stats['weights']
expected_portolio_return = np.sum(df_stats['expected_return_weighted'])
| Python | 54 | 32.129631 | 95 | /stock_portfolio_risk.py | 0.697934 | 0.695142 |
vanya2143/ITEA-tasks | refs/heads/master | """
2. Написать декоратор log, который будет выводить на экран все аргументы,
которые передаются вызываемой функции.
@log
def my_sum(*args):
return sum(*args)
my_sum(1,2,3,1) - выведет "Функция была вызвана с - 1, 2, 3, 1"
my_sum(22, 1) - выведет "Функция была вызвана с - 22, 1"
"""
def log(func):
def wrapper(*args):
res = func(*args)
print("Функция была вызвана с - " + ', '.join(map(str, args)))
return res
return wrapper
@log
def my_sum(*args):
return
if __name__ == '__main__':
my_sum(11, 2, 3, 's', 4)
| Python | 28 | 19.035715 | 73 | /hw-2/task_2.py | 0.597148 | 0.561497 |
vanya2143/ITEA-tasks | refs/heads/master | """
Реализовать алгоритм бинарного поиска на python.
На вход подается упорядоченный список целых чисел, а так же элемент,
который необходимо найти и указать его индекс,
в противном случае – указать что такого элемента нет в заданном списке.
"""
def search_item(some_list, find_item):
some_list.sort()
list_length = len(some_list)
start = 0
end = list_length - 1
mid = list_length // 2
i = 0
while i < list_length:
if find_item == some_list[mid]:
return f'Число {some_list[mid]}, найдено по индексу {mid}'
elif find_item > some_list[mid]:
start = mid + 1
mid = start + (end - start) // 2
else:
end = mid - 1
mid = (end - start) // 2
i += 1
else:
return f'Числа {find_item} нету в списке!'
if __name__ == '__main__':
# my_list = list(range(0, 100))
my_list = [1, 23, 33, 54, 42, 77, 234, 99, 2]
my_item = 42
print(search_item(my_list, my_item))
| Python | 40 | 24.025 | 71 | /hw-1/task_3.py | 0.567433 | 0.535465 |
vanya2143/ITEA-tasks | refs/heads/master | # 2. Используя модуль unittests написать тесты: сложения двух матриц, умножения матрицы и метод transpose
import unittest
from .task_1 import Matrix, MatrixSizeError
class TestMatrix(unittest.TestCase):
def setUp(self) -> None:
self.matrix_1 = Matrix([[1, 2, 9], [3, 4, 0], [5, 6, 4]])
self.matrix_2 = Matrix([[2, 3, 0], [1, 2, 3], [5, 6, 4]])
self.matrix_3 = Matrix([[2, 9], [4, 0], [6, 4]])
self.matrix_4 = Matrix([[2, 9], [4, 0], [6, 4]])
def test_add_three(self):
self.assertEqual(self.matrix_1 + self.matrix_2, [[3, 5, 9], [4, 6, 3], [10, 12, 8]])
def test_add_two_size(self):
self.assertEqual(self.matrix_3 + self.matrix_4, [[4, 18], [8, 0], [12, 8]])
def test_add_error(self):
with self.assertRaises(MatrixSizeError):
self.matrix_1 + self.matrix_3
def test_mul_integer(self):
self.assertEqual(self.matrix_1 * 2, [[2, 4, 18], [6, 8, 0], [10, 12, 8]])
def test_mul_float(self):
self.assertEqual(self.matrix_1 * 2.5, [[2.5, 5.0, 22.5], [7.5, 10.0, 0.0], [12.5, 15.0, 10.0]])
def test_transpose_and_transpose_over_transposed_instance(self):
self.assertEqual(self.matrix_1.transpose(), [[1, 3, 5], [2, 4, 6], [9, 0, 4]])
self.assertEqual(self.matrix_1.transpose(), [[1, 2, 9], [3, 4, 0], [5, 6, 4]])
if __name__ == '__main__':
unittest.main()
| Python | 37 | 36.540539 | 105 | /hw-6/task_2.py | 0.565155 | 0.478042 |
vanya2143/ITEA-tasks | refs/heads/master | """
1. Определить количество четных и нечетных чисел в заданном списке.
Оформить в виде функции, где на вход будет подаваться список с целыми числами.
Результат функции должен быть 2 числа, количество четных и нечетных соответственно.
"""
def list_check(some_list):
even_numb = 0
not_even_numb = 0
for elem in some_list:
if elem % 2 == 0:
even_numb += 1
else:
not_even_numb += 1
return f"even: {even_numb}, not even: {not_even_numb}"
if __name__ == '__main__':
my_list = list(range(1, 20))
print(list_check(my_list))
| Python | 23 | 24.608696 | 83 | /hw-1/task_1.py | 0.62309 | 0.604414 |
vanya2143/ITEA-tasks | refs/heads/master | """
Реализовать некий класс Matrix, у которого:
1. Есть собственный конструктор, который принимает в качестве аргумента - список списков,
копирует его (то есть при изменении списков, значения в экземпляре класса не должны меняться).
Элементы списков гарантированно числа, и не пустые.
2. Метод size без аргументов, который возвращает кортеж вида (число строк, число столбцов).
3. Метод transpose, транспонирующий матрицу и возвращающую результат (данный метод модифицирует
экземпляр класса Matrix)
4. На основе пункта 3 сделать метод класса create_transposed, который будет принимать на вход список списков,
как и в пункте 1, но при этом создавать сразу транспонированную матрицу.
https://ru.wikipedia.org/wiki/%D0%A2%D1%80%D0%B0%D0%BD%D1%81%D0%BF%D0%BE%D0%BD%D0%B8%D1%80%D0%
"""
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
my_list = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
t = Matrix(my_list)
t.transpose()
print(t.data_list)
t2 = Matrix.create_transposed(my_list)
print(t2.data_list)
| Python | 50 | 29.719999 | 109 | /hw-3/task_1.py | 0.664714 | 0.639323 |
vanya2143/ITEA-tasks | refs/heads/master | """
1. Реализовать подсчёт елементов в классе Matrix с помощью collections.Counter.
Можно реализовать протоколом итератора и тогда будет такой вызов - Counter(maxtrix).
Либо сделать какой-то метод get_counter(), который будет возвращать объект Counter и подсчитывать все элементы
внутри матрицы. Какой метод - ваш выбор.
"""
from collections import Counter
class MatrixSizeError(Exception):
pass
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
self.counter = Counter
def __add__(self, other):
if self.size() != other.size():
raise MatrixSizeError(
f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'
)
return [
[self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]
for row in range(self.size()[0])
]
def __mul__(self, other):
return [[item * other for item in row] for row in self.data_list]
def __str__(self):
return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n')
def get_counter(self):
return self.counter(elem for list_elem in self.data_list for elem in list_elem)
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
list_2 = [[2, 3], [1, 2], [5, 6]]
matrix1 = Matrix(list_1)
matrix2 = Matrix(list_2)
print(matrix1.get_counter())
print(matrix2.get_counter())
| Python | 67 | 27.985075 | 110 | /hw-6/task_1.py | 0.591658 | 0.57724 |
vanya2143/ITEA-tasks | refs/heads/master | """
К реализованному классу Matrix в Домашнем задании 3 добавить следующее:
1. __add__ принимающий второй экземпляр класса Matrix и возвращающий сумму матриц,
если передалась на вход матрица другого размера - поднимать исключение MatrixSizeError
(по желанию реализовать так, чтобы текст ошибки содержал размерность 1 и 2 матриц - пример:
"Matrixes have different sizes - Matrix(x1, y1) and Matrix(x2, y2)")
2. __mul__ принимающий число типа int или float и возвращающий матрицу, умноженную на скаляр
3. __str__ переводящий матрицу в строку.
Столбцы разделены между собой табуляцией, а строки — переносами строк (символ новой строки).
При этом после каждой строки не должно быть символа табуляции и в конце не должно быть переноса строки.
"""
class MatrixSizeError(Exception):
pass
class Matrix:
def __init__(self, some_list):
self.data_list = some_list.copy()
def __add__(self, other):
if self.size() != other.size():
raise MatrixSizeError(
f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'
)
return [
[self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]
for row in range(self.size()[0])
]
def __mul__(self, other):
return [[item * other for item in row] for row in self.data_list]
def __str__(self):
return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n')
def size(self):
row = len(self.data_list)
col = len(self.data_list[0])
return row, col
def transpose(self):
t_matrix = [
[item[i] for item in self.data_list] for i in range(self.size()[1])
]
self.data_list = t_matrix
return self.data_list
@classmethod
def create_transposed(cls, int_list):
obj = cls(int_list)
obj.transpose()
return obj
if __name__ == '__main__':
list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]
list_2 = [[2, 3, 0], [1, 2, 3], [5, 6, 4]]
list_3 = [[2, 3], [1, 2], [5, 6]]
t1 = Matrix(list_1)
t1.transpose()
t2 = Matrix.create_transposed(list_2)
t3 = Matrix(list_3)
print("t1: ", t1.data_list)
print("t2: ", t2.data_list)
print("t3: ", t3.data_list)
# __add__
print("\nt1.__add__(t2) : ", t1 + t2)
try:
print("\nПробую: t1 + t3")
print(t1 + t3)
except MatrixSizeError:
print('Тут было вызвано исключение MatrixSizeError')
# __mul__
print("\nt2.__mul__(3): \n", t2 * 3)
# __str__
print('\nt1.__str__')
print(t1)
| Python | 90 | 28.288889 | 103 | /hw-4/task_1.py | 0.58915 | 0.563354 |
vanya2143/ITEA-tasks | refs/heads/master | """
Сделать скрипт, который будет делать GET запросы на следующие ресурсы:
"http://docs.python-requests.org/",
"https://httpbin.org/get",
"https://httpbin.org/",
"https://api.github.com/",
"https://example.com/",
"https://www.python.org/",
"https://www.google.com.ua/",
"https://regex101.com/",
"https://docs.python.org/3/this-url-will-404.html",
"https://www.nytimes.com/guides/",
"https://www.mediamatters.org/",
"https://1.1.1.1/",
"https://www.politico.com/tipsheets/morning-money",
"https://www.bloomberg.com/markets/economics",
"https://www.ietf.org/rfc/rfc2616.txt"
Для каждого запроса должен быть вывод по примеру: "Resource 'google.com.ua',
request took 0.23 sec, response status - 200."
В реализации нет ограничений - можно использовать процессы, потоки, асинхронность.
Любые вспомагательные механизмы типа Lock, Semaphore, пулы для тредов и потоков.
"""
import aiohttp
import asyncio
from time import time
async def get_response(session, url):
async with session.get(url) as resp:
return resp.status
async def request(url):
async with aiohttp.ClientSession() as session:
time_start = time()
status_code = await get_response(session, url)
print(f"Resource '{url}', request took {time() - time_start:.3f}, response status - {status_code}")
if __name__ == '__main__':
urls = [
"http://docs.python-requests.org/",
"https://httpbin.org/get",
"https://httpbin.org/",
"https://api.github.com/",
"https://example.com/",
"https://www.python.org/",
"https://www.google.com.ua/",
"https://regex101.com/",
"https://docs.python.org/3/this-url-will-404.html",
"https://www.nytimes.com/guides/",
"https://www.mediamatters.org/",
"https://1.1.1.1/",
"https://www.politico.com/tipsheets/morning-money",
"https://www.bloomberg.com/markets/economics",
"https://www.ietf.org/rfc/rfc2616.txt"
]
futures = [request(url) for url in urls]
loop = asyncio.get_event_loop()
t_start = time()
loop.run_until_complete(asyncio.wait(futures))
t_end = time()
print(f"Full fetching got {t_end - t_start:.3f} seconds.")
| Python | 67 | 32.701492 | 107 | /hw-7/task_1.py | 0.627989 | 0.61116 |
vanya2143/ITEA-tasks | refs/heads/master | """
Написать функцию, которая принимает 2 числа.
Функция должна вернуть сумму всех элементов числового ряда между этими двумя числами.
(если подать 1 и 5 на вход, то результат должен считаться как 1+2+3+4+5=15)
"""
def all_numbers_sum(num1, num2):
return sum([num for num in range(num1, num2 + 1)])
if __name__ == '__main__':
print(all_numbers_sum(1, 5))
| Python | 13 | 27.23077 | 85 | /hw-1/task_2.py | 0.689373 | 0.643052 |
vanya2143/ITEA-tasks | refs/heads/master | # Реализовать пример использования паттерна Singleton
from random import choice
# Генератор событий
def gen_events(instance, data, count=2):
for i in range(count):
event = choice(data)
instance.add_event(f'Event-{event}-{i}', event)
# Singleton на примере списка событий
class EventsMeta(type):
_instance = None
def __call__(cls):
if cls._instance is None:
cls._instance = super().__call__()
return cls._instance
class Events(metaclass=EventsMeta):
# __metaclass__ = EventsMeta
_events = {
'ok': [],
'info': [],
'warn': [],
'error': []
}
def get_all_events(self):
"""
:return: dict with all events and types
"""
return self._events
def get_events_count(self, key: str = None):
"""
:param key: if need count of specific type
:return: all events count or specific event count if param key: not None
:rtype: tuple, int
"""
if key:
try:
return len(self._events[key])
# return key, len(self._events[key])
except KeyError:
print('Тип события должен быть ' + ', '.join(self._events.keys()))
return
return tuple((event, len(self._events[event])) for event in self._events.keys())
def add_event(self, event: str, event_type: str):
"""
:param event: event message
:param event_type: ok, info, warn, error
:return: None
"""
try:
self._events[event_type].append(event)
except KeyError:
print('Тип события должен быть ' + ', '.join(self._events.keys()))
def read_event(self, event_type: str):
"""
:param event_type: ok, info, warn, error
:return: tuple last item of event_type, all count events or None
"""
try:
return self._events[event_type].pop(), len(self._events[event_type])
except IndexError:
print('Событий больше нет')
return
except KeyError:
print('Указан неверный тип события')
return
@classmethod
def get_events_types(cls):
return cls._events.keys()
if __name__ == '__main__':
event_instance1 = Events()
event_instance2 = Events()
event_instance3 = Events()
print(type(event_instance1), id(event_instance1))
print(type(event_instance2), id(event_instance2))
# Генерируем события
gen_events(event_instance3, list(event_instance3.get_events_types()), 50)
# Получаем все события
print(event_instance2.get_all_events())
# Получаем колличества всех типов событий и обределенного типа
print(event_instance3.get_events_count())
print(f"Error: {event_instance3.get_events_count('error')}")
# Читаем события
while event_instance3.get_events_count('ok'):
print(event_instance3.read_event('ok'))
| Python | 104 | 27.586538 | 88 | /hw-5/task_1.py | 0.57854 | 0.572822 |
vanya2143/ITEA-tasks | refs/heads/master | """
1. Написать функцию, которая будет принимать на вход натуральное число n,
и возращать сумму его цифр. Реализовать используя рекурсию
(без циклов, без строк, без контейнерных типов данных).
Пример: get_sum_of_components(123) -> 6 (1+2+3)
"""
def get_sum_of_components_two(n):
return 0 if not n else n % 10 + get_sum_of_components_two(n // 10)
if __name__ == '__main__':
print(get_sum_of_components_two(123))
| Python | 14 | 29.214285 | 73 | /hw-2/task_1.py | 0.690307 | 0.652482 |
glorizen/hi10enc | refs/heads/master | import os
from flask import Flask
from flask import request
from flask import jsonify
from flask import render_template
from flask import send_from_directory
from parsers import MediaParser
from parsers import AvsParser
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'd:/temp'
@app.route('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/default')
def default():
# return send_from_directory(app.config['UPLOAD_FOLDER'])
return None
@app.route('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/demo2')
def demo2():
# return send_from_directory(app.config['UPLOAD_FOLDER'])
return None
@app.route('/static/styles')
def styles():
return None
@app.route('/static/scripts')
def scripts():
return None
@app.route('/')
def index():
return render_template('site_specific/index.html')
@app.route('/encode/video')
def video_command():
return 'Video Command Here.'
@app.route('/encode/audio')
def audio_command():
return 'Audio Command Here.'
@app.route('/extract/subtitle')
def sub_extract_command():
return 'Sub-Extraction Command Here.'
@app.route('/extract/video')
def video_extract_command():
return 'Video-Extraction Command Here.'
@app.route('/extract/audio')
def audio_extract_commmand():
return 'Audio-Extraction Command Here.'
@app.route('/info/ffmpeg')
def ffmpeg_info():
return 'ffmpeg info Here.'
@app.route('/info/x264')
def x264_info():
return 'X264 info here.'
@app.route('/info/x265')
def x265_info():
return 'X265 info here.'
@app.route('/info/libopus')
def libopus_info():
return 'libopus info here.'
@app.route('/info/libfdk_aac')
def libfdk_info():
return 'libfdk_aac info here.'
@app.route('/merge/mkvmerge')
def mkvmerge_command():
return 'mkvmerge command here.'
@app.route('/ajax/metadata', methods=["GET", "POST"])
def ajax_parse_metadata():
xml_string = request.json['mediainfo']
avs_string = request.json['avscript']
if not xml_string:
pass
media_parser = MediaParser(xml_string)
avs_parser = AvsParser(avs_string)
data = dict()
data['general_details'] = media_parser.get_general_details(media_parser.mediainfo)
data['video_details'] = media_parser.get_video_details(media_parser.mediainfo)
data['audio_details'] = media_parser.get_audio_details(media_parser.mediainfo)
data['subtitle_details'] = media_parser.get_subtitle_details(media_parser.mediainfo)
data['menu_details'] = media_parser.get_menu_details(media_parser.mediainfo)
return jsonify(data)
| Python | 111 | 21.432432 | 86 | /app.py | 0.715375 | 0.702529 |
glorizen/hi10enc | refs/heads/master | from pymediainfo import MediaInfo
class MediaParser(object):
def __init__(self, xml_string):
self.mediainfo = MediaInfo(xml_string)
self.metadata = self.mediainfo.to_data()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_general_details(self, mediainfo):
general_details = list()
for track in mediainfo.tracks:
if 'general' in track.track_type.lower():
track_details = dict()
track_details['file_name'] = track.file_name
track_details['file_extension'] = track.file_extension
track_details['file_size'] = track.file_size
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['stream_size'] = track.stream_size
track_details['attachments'] = track.attachments
general_details.append(track_details)
return general_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_video_details(self, mediainfo):
vid_details = list()
for track in mediainfo.tracks:
if 'video' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['frame_rate_mode'] = track.frame_rate_mode
track_details['frame_rate'] = float(track.frame_rate)
track_details['resolution'] = (track.width, track.height)
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = float(track.bit_rate)
track_details['bit_depth'] = track.bit_depth
track_details['stream_size'] = track.stream_size
track_details['display_aspect_ratio'] = float(track.display_aspect_ratio)
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
vid_details.append(track_details)
return vid_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_audio_details(self, mediainfo):
aud_details = list()
for track in mediainfo.tracks:
if 'audio' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = track.bit_rate
track_details['channels'] = track.channel_s
track_details['sampling_rate'] = track.sampling_rate
track_details['stream_size'] = track.stream_size
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
aud_details.append(track_details)
return aud_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_subtitle_details(self, mediainfo):
aud_details = list()
for track in mediainfo.tracks:
if 'text' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = track.bit_rate
track_details['stream_size'] = track.stream_size
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
aud_details.append(track_details)
return aud_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_menu_details(self, mediainfo):
menu_details = list()
for track in mediainfo.tracks:
if 'menu' in track.track_type.lower():
menu_data = track.to_data()
menu = list()
for key in menu_data:
if key.replace('_', str()).isdigit():
menu.append((key.replace('_', ':'), menu_data[key]))
menu_details.append(menu)
return menu_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
class AvsParser(object):
def __init__(self, avs_string):
self.avs_content = [line for line in avs_string.split('\n')
if line and not line.startswith('#') or line.startswith('##>')
or line.startswith('##!!')]
print(self.avs_content)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def parse_avs_chapters(self, avs_content):
avs_chap_string = ''.join([x.strip('##!!') for x in avs_content
if x.startswith('##!!') and '>' in x and '<' in x])
if not avs_chap_string:
return None
filtered_chaps = [x.strip('>').strip('<').strip(' ').strip('\n')
for x in avs_chap_string.split(',')] if avs_chap_string else None
avs_chapters = dict()
avs_chapters['names'] = list(); avs_chapters['frames'] = list()
for chapter in filtered_chaps:
name = chapter.split('[')[0]
start = int(chapter.split('[')[1].split(':')[0].strip(' '))
end = int(chapter.split('[')[1].split(':')[1].split(']')[0].strip(' '))
avs_chapters['names'].append(name)
avs_chapters['frames'].append((start, end))
return avs_chapters
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_custom_commands(self, avs_content):
commands_dict = dict()
avsfile = open(input_file)
file_content = avsfile.readlines()
avsfile.close()
commands = ','.join([x.strip('##>') for x in avs_content if x.startswith('##>')]).split(',')
for command in commands:
if not command or len(command) < 3:
continue
option, value = command.split('=')
commands_dict[option] = value.strip('\r').strip('\n')
avs_chapters = parse_avs_chapters(avs_content)
return commands_dict
| Python | 186 | 34.446236 | 96 | /parsers.py | 0.505233 | 0.504171 |
adbedada/aia | refs/heads/master | #!/usr/bin/env python
# test yolo
#based on
"""
https://github.com/awslabs/amazon-sagemaker-examples/blob/
master/advanced_functionality/tensorflow_bring_your_own/container/cifar10/train
"""
import os
import sys
import subprocess
import traceback
import tqdm
def _run(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ)
# stdout, stderr = process.communicate()
# line = process.stdout.readline()
# print(line)
return_code = process.poll()
if return_code:
error_msg = 'Return Code: {}, CMD: {}, Err: {}'.format(return_code, cmd, stderr)
raise Exception(error_msg)
if __name__ == '__main__':
try:
train_cmd = ['./darknet/darknet',
'detector',
'train',
'cfg/boat.data',
'cfg/boat.cfg',
'cfg/darknet19_448.conv.23']
_run(train_cmd)
sys.exit(0)
except Exception as e:
trc = traceback.format_exc()
print('Exception during training: ' + str(e) + '\n' + trc, file=sys.stderr)
sys.exit(255)
#./darknet/darknet detector test cfg/ships | Python | 42 | 27.166666 | 99 | /sagemaker/yolo/train | 0.593063 | 0.582064 |
HeyamBasem/Digit-Recognition- | refs/heads/main | # sort data because using fetch_openml() return unsorted data
from scipy import ndimage
def sort_by_target(mnist):
reorder_train = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[:60000])]))[:, 1]
reorder_test = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[60000:])]))[:, 1]
mnist.data[:60000] = mnist.data[reorder_train]
mnist.target[:60000] = mnist.target[reorder_train]
mnist.data[60000:] = mnist.data[reorder_test + 60000]
mnist.target[60000:] = mnist.target[reorder_test + 60000]
import numpy as np
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, cache=True)
mnist.target = mnist.target.astype(np.int8) # fetch_openml() returns targets as strings
print(sort_by_target(mnist)) # fetch_openml() returns an unsorted dataset
X, y = mnist["data"], mnist["target"]
print(X, y)
print(X.shape)
print(y.shape)
import matplotlib
import matplotlib.pyplot as plt
some_digit = X[36000]
some_digit_image = some_digit.reshape(28, 28)
plt.imshow(some_digit_image, cmap=matplotlib.cm.binary, interpolation="nearest")
plt.axis("off")
# plt.show()
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary,
interpolation="nearest")
plt.axis("off")
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = matplotlib.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]]
plot_digits(example_images, images_per_row=10)
# plt.show()
print(y[36000])
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
import numpy as np
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# binary classifier
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)
sgd_clf.fit(X_train, y_train_5)
print(sgd_clf.predict([some_digit]))
from sklearn.model_selection import cross_val_score
print(cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy"))
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
print(cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy"), '\n')
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_train_5, y_train_pred), '\n')
y_train_perfect_predictions = y_train_5
print(confusion_matrix(y_train_5, y_train_perfect_predictions), '\n')
from sklearn.metrics import precision_score, recall_score
print(precision_score(y_train_5, y_train_pred))
# print(4344 / (4344 + 1307), '\n') #must be the same result
print(recall_score(y_train_5, y_train_pred))
# print(4344 / (4344 + 1077), '\n') #must be the same result
from sklearn.metrics import f1_score
print(f1_score(y_train_5, y_train_pred))
# print(4344 / (4344 + (1077 + 1307)/2), '\n') #must be the same result
y_scores = sgd_clf.decision_function([some_digit])
print(y_scores)
threshold = 0
y_some_digit_pred = (y_scores > threshold)
print(y_some_digit_pred)
threshold = 200000
y_some_digit_pred = (y_scores > threshold)
print(y_some_digit_pred)
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,
method="decision_function")
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
# Precision and Recall versus the decision Threshold
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="upper left", fontsize=16)
plt.ylim([0, 1])
plt.figure(figsize=(8, 4))
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.xlim([-700000, 700000])
# plt.show()
# Precision versus Recall
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
# plt.show()
y_train_pred_90 = (y_scores > 70000)
precision_score(y_train_5, y_train_pred_90)
recall_score(y_train_5, y_train_pred_90)
# ROC curves
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
# ROC Curve
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
# plt.show()
from sklearn.metrics import roc_auc_score
print(roc_auc_score(y_train_5, y_scores))
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(n_estimators=10, random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,
method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5, y_scores_forest)
# Comparing ROC Curves
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
plt.legend(loc="lower right", fontsize=16)
# plt.show()
# show the plot of compairson between random forest and SGD
print(roc_auc_score(y_train_5, y_scores_forest))
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
precision_score(y_train_5, y_train_pred_forest)
recall_score(y_train_5, y_train_pred_forest)
# Multiclass Classification
sgd_clf.fit(X_train, y_train) # y_train, not y_train_5
print(sgd_clf.predict([some_digit]))
some_digit_scores = sgd_clf.decision_function([some_digit])
print(some_digit_scores)
print(np.argmax(some_digit_scores))
print(sgd_clf.classes_)
print(sgd_clf.classes_[5])
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, tol=-np.infty, random_state=42))
ovo_clf.fit(X_train, y_train)
ovo_clf.predict([some_digit])
print(len(ovo_clf.estimators_))
forest_clf.fit(X_train, y_train)
print(forest_clf.predict([some_digit]))
"""" Difference between predict method and predict_proba : """
print(forest_clf.predict_proba([some_digit]))
print(cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy"))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
print(cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy"))
# error analysis
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
print(conf_mx)
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
plt.matshow(conf_mx, cmap='magma')
# plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap='cividis')
plt.show()
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8, 8))
plt.subplot(221);
plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222);
plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223);
plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224);
plot_digits(X_bb[:25], images_per_row=5)
plt.show()
# Multilabel classification
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
print(knn_clf.predict([some_digit]))
# # the below code takes very long running time
# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)
# print(f1_score(y_multilabel, y_train_knn_pred, average="macro"))
# # y_train not y_multilables , the book didn't se n_jobs=-1 either
# # the answer should be 0.97709078477525
# Multioutput classification
# add noise to img of 5 and see if it still class it as 5
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
# Image of number 5
some_index = 5500
plt.subplot(121);
plot_digit(X_test_mod[some_index]) # The noisy input image
plt.subplot(122);
plot_digit(y_test_mod[some_index]) # Original image (target)
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit) # Clean image after removing the noises
# Exercises
# Q1
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
from sklearn.model_selection import GridSearchCV
g = GridSearchCV(knn, {'weights': ["uniform", "distance"], 'n_neighbors': [3, 4, 5]}, cv=5)
g.fit(X_train, y_train_5)
print(g.best_params_)
print(g.best_score_)
from sklearn.metrics import accuracy_score
y_pred = g.predict(X_test)
print(accuracy_score(y_test, y_pred))
# Q2
from scipy.ndimage.interpolation import shift
def shift_image(image, dx, dy):
image = image.reshape((28, 28))
shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
return shifted_image.reshape([-1])
image = X_train[2645]
shifted_image_down = shift_image(image, 0, 4)
shifted_image_left = shift_image(image, -5, 0)
shifted_image_up = shift_image(image, 0, -7)
shifted_image_right = shift_image(image, 7, 0)
plt.figure(figsize=(12, 3))
plt.subplot(231)
plt.title("Original", fontsize=14)
plt.imshow(image.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.subplot(232)
plt.title("Shifted down", fontsize=14)
plt.imshow(shifted_image_down.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.subplot(233)
plt.title("Shifted left", fontsize=14)
plt.imshow(shifted_image_left.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.show()
plt.subplot(234)
plt.title("Shifted up", fontsize=14)
plt.imshow(shifted_image_up.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.show()
plt.subplot(235)
plt.title("Shifted right", fontsize=14)
plt.imshow(shifted_image_right.reshape(28, 28), interpolation="nearest", cmap="Greys")
plt.show()
X_train_augmented = [image for image in X_train]
y_train_augmented = [label for label in y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
for image, label in zip(X_train, y_train):
X_train_augmented.append(shift_image(image, dx, dy))
y_train_augmented.append(label)
X_train_augmented = np.array(X_train_augmented)
y_train_augmented = np.array(y_train_augmented)
shuffle_idx = np.random.permutation(len(X_train_augmented))
X_train_augmented = X_train_augmented[shuffle_idx]
y_train_augmented = y_train_augmented[shuffle_idx]
knn = KNeighborsClassifier(**g.best_params_)
knn.fit(X_train_augmented, y_train_augmented)
y_pred = knn_clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
# End of code
| Python | 421 | 28.783848 | 106 | /BookProject.py | 0.677006 | 0.640509 |
issyl0/alexa-house-cleaning-rota | refs/heads/master | def alexa_handler(event, context):
request = event['request']
# called when invoked with no values - early exit
if request['type'] == 'LaunchRequest':
return get_welcome_response()
if request['type'] == 'IntentRequest':
intent = request['intent']
if intent['name'] == 'HouseCleaningRota':
return make_response(
get_cleaning_rota_status(intent),
card_title='Lookup'
)
elif intent['name'] == 'AMAZON.HelpIntent':
return get_welcome_response()
elif intent['name'] in ('AMAZON.StopIntent', 'AMAZON.CancelIntent'):
return make_response(
'Thank you for using House Cleaning Rota',
card_title='Goodbye',
)
# default catch all in case nothing else matches
return make_response("Sorry, I didn't understand that request")
def get_welcome_response():
welcome = """
Welcome to the House Cleaning Rota Alexa skill. You can
ask me which week of the rota it is, and find out what
jobs each person has to do.
"""
return make_response(
welcome,
card_title='Welcome',
reprompt_text=welcome,
should_end_session=False
)
def _get_cleaning_rota(intent):
slots = intent.get('slots')
speech_output = None
if slots:
cleaning_rota = slots['HouseCleaningRota'].get('value')
if cleaning_rota:
week = check_week
all_jobs = check_all_jobs
speech_output = 'It is week ' + week + '. Jack must: ' +
all_jobs['jack'] '. Phil must: ' + all_jobs['phil'] + '.
Isabell must: ' + all_jobs['isabell'] + '.'
else:
speech_output = 'Ask me to check the house cleaning rota.'
return speech_output
def check_week:
import requests
week = 'Unknown'
r = requests.get('https://house-cleaning-rota.eu-west-2.elasticbeanstalk.com/week.json')
week = r.json()['week']
return week
def check_all_jobs:
import requests
jobs = ''
r = requests.get('https://house-cleaning-rota.eu-west-2.elasticbeanstalk.com/jobs.json')
jobs = r.json()
return jobs
def make_response(text, card_title='Thanks', should_end_session=True,
reprompt_text=None):
response = {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': text,
},
'card': {
'type': 'Simple',
'title': card_title,
'content': text
},
'shouldEndSession': should_end_session
}
}
if reprompt_text:
response['reprompt'] = {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
}
return response
| Python | 106 | 26.688679 | 92 | /handler.py | 0.539353 | 0.53799 |
joseruiz1989/teste_python_vsc_github | refs/heads/master | print("teste print file from github")
print("hola desde vs")
print("testesito más 1")
print("testesito más 12") | Python | 6 | 18 | 37 | /teste_code.py | 0.725664 | 0.699115 |
AriniInf/PROGJAR_05111740007003 | refs/heads/master | from client import *
if __name__=='__main__':
os.chdir('./client')
upload('progjar.txt', 'progjar.txt') | Python | 5 | 21.6 | 40 | /tugas4/client_upload.py | 0.580357 | 0.580357 |
AriniInf/PROGJAR_05111740007003 | refs/heads/master | import sys
import socket
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('127.0.0.1', 10000)
print(f"starting up on {server_address}")
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print("waiting for a connection")
connection, client_address = sock.accept()
print(f"connection from {client_address}")
# Receive the data in small chunks and retransmit it
request = connection.recv(1024)
file = open(request.decode(),"rb")
print("request received")
while True:
data = file.read(1024)
if not data:
break
connection.sendall(data)
print("sending.....")
# Clean up the connection
file.close()
connection.close()
| Python | 28 | 29.178572 | 56 | /tugas1/tugas1b/server/server.py | 0.668639 | 0.64497 |
AriniInf/PROGJAR_05111740007003 | refs/heads/master | import threading
import logging
import requests
import datetime
import os
def download_gambar(url=None):
if (url is None):
return False
ff = requests.get(url)
tipe = dict()
tipe['image/png']='png'
tipe['image/jpg']='jpg'
tipe['image/jpeg']='jpg'
content_type = ff.headers['Content-Type']
logging.warning(content_type)
if (content_type in list(tipe.keys())):
namafile = os.path.basename(url)
ekstensi = tipe[content_type]
logging.warning(f"writing {namafile}")
fp = open(f"{namafile}","wb")
fp.write(ff.content)
fp.close()
else:
return False
if __name__=='__main__':
threads = []
gambar = [
'https://myrepro.files.wordpress.com/2015/10/wpid-wallpaper-pemandangan-pantai-jpg.jpeg',
'https://myrepro.files.wordpress.com/2015/10/wpid-wallpaper-pemandangan-air-terjun-jpg.jpeg',
'https://upload.wikimedia.org/wikipedia/commons/6/65/Pemandangan_alam.jpg'
]
for i in gambar:
t = threading.Thread(target=download_gambar,args=(i,))
threads.append(t)
t.start()
| Python | 42 | 25.952381 | 101 | /tugas3/client_3.py | 0.616946 | 0.603707 |
AriniInf/PROGJAR_05111740007003 | refs/heads/master | from client import *
if __name__=='__main__':
os.chdir('./client')
download('opo.txt', 'abc.txt') | Python | 6 | 17 | 34 | /tugas4/client_download.py | 0.551402 | 0.551402 |
xmlabs-io/xmlabs-python | refs/heads/master | from .aws_lambda import xmlabs_lambda_handler
| Python | 1 | 45 | 45 | /xmlabs/__init__.py | 0.826087 | 0.826087 |
xmlabs-io/xmlabs-python | refs/heads/master | from .config import xmlabs_settings
from .env import get_environment
from functools import wraps
def xmlabs_lambda_handler(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
env, config = None , None
try:
env = get_environment(*args, **kwargs)
if not env:
raise Exception("No Environment detected")
except Exception as ex:
## TODO: Improve Exception catching here
## TODO: Log to cloudwatch that Getting environment failed
raise
try:
config = xmlabs_settings(env)
if not config:
raise Exception("No Configuration found")
except Exception as ex:
## TODO: Improve Exception catching
## TODO: Log to cloudwatch that Retrieving Settings failed
raise
## Standard Invoke logging for
#lambda_invoke_logger(*args, **kwargs)
try:
return fn(*args, **kwargs, config=config)
except Exception as ex:
# Make a standard error log to Cloudwatch for eas of capturing
raise
return wrapped
| Python | 37 | 29.972973 | 74 | /xmlabs/aws_lambda/handler.py | 0.582024 | 0.582024 |
xmlabs-io/xmlabs-python | refs/heads/master | import pytest
from xmlabs.aws_lambda.config import settings
def test_xmlabs_aws_lambda_config():
"""Assert Settings"""
assert settings
| Python | 7 | 19.857143 | 45 | /tests/test_aws_lambda_settings.py | 0.732877 | 0.732877 |
xmlabs-io/xmlabs-python | refs/heads/master | from .handler import xmlabs_lambda_handler
| Python | 1 | 42 | 42 | /xmlabs/aws_lambda/__init__.py | 0.818182 | 0.818182 |
xmlabs-io/xmlabs-python | refs/heads/master | import os
import logging
logger = logging.getLogger()
def get_environment(event, context=None):
valid_envs = ["stage", "prod", "dev"]
env = None
# default_env = os.getenv("DEFAULT_ENV", "dev")
default_env = os.getenv("APP_ENV", os.getenv("DEFAULT_ENV", "dev"))
override_env = os.getenv("ENV")
if override_env:
logger.info("Overriding Environment with {}".format(override_env))
return override_env
####################################
### X-Environment ###
### (override) ###
####################################
if event.get('headers'):
if event['headers'].get("X-Environment"):
return event['headers']['X-Environment'].lower()
####################################
### if lambda function arn ###
####################################
split_arn = None
try:
split_arn = context.invoked_function_arn.split(':')
except Exception as ex:
split_arn = None
if split_arn:
####################################
### lambda function arn alias ###
### (preferred) ###
####################################
e = split_arn[len(split_arn) - 1]
if e in valid_envs:
env = e
return env.lower()
#######################################
### Lambda Function Name Evaluation ###
#######################################
split_fn = split_arn[6].split("_")
if split_fn[-1].lower() in valid_envs:
return split_fn[-1].lower()
####################################
### Stage Variable Evaluation ###
####################################
apiStageVariable = None
if event.get("stageVariables"):
apiStageVariable = event["stageVariables"].get("env")
env = apiStageVariable
apiStage = None
if event.get("requestContext"):
apiStage = event["requestContext"].get("stage")
if not env:
env = apiStage
if apiStage and apiStageVariable and apiStage != apiStageVariable:
logger.warning("Tentrr: Using different api GW stagename and api Stage Variable is not recommended")
if env:
return env.lower()
# If invoked without alias
if (not split_arn or len(split_arn) == 7) and default_env:
return default_env
else:
raise Exception("Environment could not be determined")
return None
| Python | 77 | 31.207792 | 108 | /xmlabs/aws_lambda/env.py | 0.478226 | 0.47621 |
xmlabs-io/xmlabs-python | refs/heads/master | import boto3
import logging
import requests
from functools import lru_cache
from dynaconf.utils.parse_conf import parse_conf_data
logger = logging.getLogger()
IDENTIFIER = 'aws_ssm'
def load(obj, env=None, silent=True, key=None, filename=None):
"""
Reads and loads in to "obj" a single key or all keys from source
:param obj: the settings instance
:param env: settings current env (upper case) default='DEVELOPMENT'
:param silent: if errors should raise
:param key: if defined load a single key, else load all from `env`
:param filename: Custom filename to load (useful for tests)
:return: None
"""
# Load data from your custom data source (file, database, memory etc)
# use `obj.set(key, value)` or `obj.update(dict)` to load data
# use `obj.find_file('filename.ext')` to find the file in search tree
# Return nothing
prefix = ""
if obj.get("AWS_SSM_PREFIX"):
prefix = "/{}".format(obj.AWS_SSM_PREFIX)
path = "{}/{}/".format(prefix, env.lower())
if key:
path = "{}{}/".format(path, key)
data = _read_aws_ssm_parameters(path)
try:
if data and key:
value = parse_conf_data(
data.get(key), tomlfy=True, box_settings=obj)
if value:
obj.set(key, value)
elif data:
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception as e:
if silent:
return False
raise
@lru_cache
def _read_aws_ssm_parameters(path):
logger.debug(
"Reading settings AWS SSM Parameter Store (Path = {}).".format(path)
)
print(
"Reading settings AWS SSM Parameter Store (Path = {}).".format(path)
)
result = {}
try:
ssm = boto3.client("ssm")
response = ssm.get_parameters_by_path(
Path=path,
Recursive=True,
WithDecryption=True
)
while True:
params = response["Parameters"]
for param in params:
name = param["Name"].replace(path, "").replace("/", "_")
value = param["Value"]
result[name] = value
if "NextToken" in response:
response = ssm.get_parameters_by_path(
Path=path,
Recursive=True,
WithDecryption=True,
NextToken=response["NextToken"],
)
else:
break
except Exception as ex:
print(
"ERROR: Trying to read aws ssm parameters (for {}): {}!".format(
path, str(ex)
)
)
result = {}
logger.debug("Read {} parameters.".format(len(result)))
return result
| Python | 90 | 29.722221 | 76 | /xmlabs/dynaconf/aws_ssm_loader.py | 0.558047 | 0.557324 |
xmlabs-io/xmlabs-python | refs/heads/master | from .base import ConfigSource
import logging
import requests
logger = logging.getLogger()
class ConfigSourceAwsEc2UserData(ConfigSource):
def load(self):
if self._running_in_ec2():
#TODO: fetch EC2 USERDATA
raise Exception("ConfigSourceEC2UserData Load Unimplemented")
def _running_in_ec2(self):
try:
# Based on https://gist.github.com/dryan/8271687
instance_ip_url = "http://169.254.169.254/latest/meta-data/local-ipv4"
requests.get(instance_ip_url, timeout=0.01)
return True
except requests.exceptions.RequestException:
return False
| Python | 19 | 33.842106 | 82 | /xmlabs/dynaconf/aws_ec2_userdata_loader.py | 0.645015 | 0.602719 |
xmlabs-io/xmlabs-python | refs/heads/master | from xmlabs.aws_lambda import lambda_handler
@lambda_handler
def main(event, context, config):
print(config.STRIPE_API_SECRET_KEY)
pass
if __name__ == "__main__":
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "prod"}}, {})
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "dev"}}, {})
main({"headers":{"X-Environment": "prod"}}, {})
| Python | 13 | 32.076923 | 51 | /example/aws_lambda/app.py | 0.581019 | 0.581019 |
xmlabs-io/xmlabs-python | refs/heads/master |
import pytest
from xmlabs import xmlabs_lambda_handler
@xmlabs_lambda_handler
def lambda_handler(event, context, config):
assert(config)
def test_lambda_handler():
lambda_handler({},{})
| Python | 11 | 17 | 43 | /tests/test_aws_lambda_integration.py | 0.728643 | 0.728643 |
xmlabs-io/xmlabs-python | refs/heads/master | from dynaconf import Dynaconf
from dynaconf.constants import DEFAULT_SETTINGS_FILES
LOADERS_FOR_DYNACONF = [
'dynaconf.loaders.env_loader', #Inorder to configure AWS_SSM_PREFIX we need to load it from environment
'xmlabs.dynaconf.aws_ssm_loader',
'dynaconf.loaders.env_loader', #Good to load environment last so that it takes precedenceover other config
]
ENVIRONMENTS= ['prod','dev','stage']
settings = Dynaconf(
#settings_files=['settings.toml', '.secrets.toml'],
warn_dynaconf_global_settings = True,
load_dotenv = True,
default_settings_paths = DEFAULT_SETTINGS_FILES,
loaders = LOADERS_FOR_DYNACONF,
envvar_prefix= "APP",
env_switcher = "APP_ENV",
env='dev',
environments=ENVIRONMENTS,
#environments=True,
)
def xmlabs_settings(env):
return settings.from_env(env)
| Python | 26 | 30.923077 | 110 | /xmlabs/aws_lambda/config.py | 0.715663 | 0.715663 |
xmlabs-io/xmlabs-python | refs/heads/master | from dynaconf import Dynaconf
def test_dynaconf_settingsenv():
settingsenv = Dynaconf(environments=True)
assert settingsenv
def test_dynaconf_settings():
settings = Dynaconf()
assert settings
| Python | 10 | 20.1 | 45 | /tests/test_dynaconf.py | 0.744076 | 0.744076 |
b3b0/allyourbase | refs/heads/master | import os
def wazuh():
os.system('echo "ALLYOURBASE" >> /var/log/auth.log')
print("IT HAS BEEN DONE")
wazuh()
| Python | 7 | 16.142857 | 56 | /allyourbase.py | 0.625 | 0.625 |
rexapex/tealight-files | refs/heads/master | from tealight.art import (color, line, spot, circle, box, image, text, background, rectangle)
from tealight.art import screen_width, screen_height
from math import sin, cos, pi, sqrt
class explosion:
def __init__(self):
self.time = 50
self.x = 0
self.y = 0
def set_pos(self, x, y):
self.x = x
self.y = y
def star(self, x, y, c, size, spines):
color(c)
angle = 0
for i in range(0, spines):
x0 = x + (size * cos(angle))
y0 = y + (size * sin(angle))
line(x, y, x0, y0)
angle = angle + (2 * pi / spines)
def draw(self):
if self.time > 0:
self.star(self.x, self.y, "orange", 50-self.time, 50-self.time)
self.time -= 1
return False
if self.time == 0:
self.star(self.x, self.y, "white", 50-self.time, 50-self.time)
return True
| Python | 39 | 21.205128 | 93 | /art/explosion.py | 0.546286 | 0.521143 |
rexapex/tealight-files | refs/heads/master | from tealight.art import (color, line, spot, circle, box, image, text, background, rectangle)
from tealight.art import screen_width, screen_height
from math import sin, cos, pi, sqrt
from github.Krimzar.art.racecar import car
from github.rexapex.art.explosion import explosion
car1 = None #The player using this computer
car2 = None
outerWallX = 5
outerWallY = 5
outerWallWidth = screen_width-10
outerWallHeight = screen_height-10
innerWallX = 120
innerWallY = 250
innerWallWidth = screen_width-240
innerWallHeight = screen_height-500
wPressed = False
aPressed = False
sPressed = False
dPressed = False
upPressed = False
downPressed = False
rightPressed = False
leftPressed = False
explosions = [None] * 1024
explosionCount = 0
#def init():
#background("track.png")
def start():
global car1, car2
car1 = car()
car2 = car()
car1.set_name("Foo")
car2.set_name("Bar")
car1.change_orientation(1)
car2.change_orientation(1)
def handle_frame():
global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed, explosions, explosionCount
color("white")
box(0, 0, screen_width, screen_height)
color("red")
if leftPressed:
car1.change_orientation(4)
elif rightPressed:
car1.change_orientation(-4)
elif upPressed:
car1.Acceleration += 0.01
if car1.Acceleration > 0.05:
car1.Acceleration = 0.05
elif downPressed:
if car1.Acceleration == 0:
if car1.Acceleration < -0.05:
car1.Acceleration = -0.05
else:
car1.Acceleration -= 0.01
if aPressed:
car2.change_orientation(4)
elif dPressed:
car2.change_orientation(-4)
elif wPressed:
car2.Acceleration += 0.01
if car2.Acceleration > 0.05:
car2.Acceleration = 0.05
elif sPressed:
if car2.Acceleration == 0:
if car2.Acceleration < -0.05:
car2.Acceleration = -0.05
else:
car2.Acceleration -= 0.01
car1.update_speed()
car2.update_speed()
testCollisions()
car1.draw_car("Foo")
car2.draw_car("Bar")
for i in range(0, explosionCount):
if explosions[i] != None:
if explosions[i].draw():
explosions[i] = None
# for i in range (0, len(otherCars)): #Draw connected players cars
# otherCars[i].draw()
#Draw the map
color("green")
rectangle(outerWallX, outerWallY, outerWallWidth, outerWallHeight)
#box(innerWallX, innerWallY, innerWallWidth, innerWallHeight)
#spot(screen_width/2, innerWallY, innerWallWidth/2)
#spot(screen_width/2, innerWallHeight+innerWallY, innerWallWidth/2)
def testCollisions():
global car1, car2
#Outer Wall Collision
if car1.CoordD["x"] <= outerWallX:
car1.CoordD["x"] = outerWallX
car1.Acceleration = 0
#car1.Speed = -car1.Speed
car1.change_orientation(-car1.TotalOrientation*2)
elif car1.CoordD["x"] >= outerWallWidth:
car1.CoordD["x"] = outerWallWidth
car1.Acceleration = 0
# car1.Speed = -car1.Speed
car1.change_orientation(-car1.TotalOrientation*2)
if car1.CoordD["y"] <= outerWallY:
car1.CoordD["y"] = outerWallY
car1.Acceleration = 0
# car1.Speed = -car1.Speed
car1.change_orientation(360+car1.TotalOrientation)
elif car1.CoordD["y"] >= outerWallHeight:
car1.CoordD["y"] = outerWallHeight
car1.Acceleration = 0
# car1.Speed = -car1.Speed
car1.change_orientation(360+car1.TotalOrientation)
if car2.CoordD["x"] <= outerWallX:
car2.CoordD["x"] = outerWallX
car2.Acceleration = 0
#car1.Speed = -car1.Speed
car2.change_orientation(-car2.TotalOrientation*2)
elif car2.CoordD["x"] >= outerWallWidth:
car2.CoordD["x"] = outerWallWidth
car2.Acceleration = 0
# car1.Speed = -car1.Speed
car2.change_orientation(-car2.TotalOrientation*2)
if car2.CoordD["y"] <= outerWallY:
car2.CoordD["y"] = outerWallY
car2.Acceleration = 0
# car1.Speed = -car1.Speed
car2.change_orientation(360+car2.TotalOrientation)
elif car2.CoordD["y"] >= outerWallHeight:
car2.CoordD["y"] = outerWallHeight
car2.Acceleration = 0
# car1.Speed = -car1.Speed
car2.change_orientation(360+car2.TotalOrientation)
#Inner Wall Collision
#if boxCollision(thisCar.x, thisCar.y, innerWallX, innerWallY, innerWallWidth, innerWallHeight):
# print "Collided with centre box"
#Returns True if point is inside the box
#def boxCollision(x, y, boxX, boxY, boxWidth, boxHeight):
# if x >= boxX and x <= boxWidth and y >= boxY and y <= boxHeight:
# return True
# else:
# return False
#Returns True if point is inside the circle
#def circleCollision():
def handle_keydown(key):
global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed, explosions, explosionCount
if key == "left":
leftPressed = True
elif key == "right":
rightPressed = True
elif key == "up":
upPressed = True
elif key == "down":
downPressed = True
elif key == "a":
aPressed = True
elif key == "d":
dPressed = True
elif key == "w":
wPressed = True
elif key == "s":
sPressed = True
elif key == "space":
explosions[explosionCount] = explosion()
explosions[explosionCount].set_pos(car2.CoordD["x"], car2.CoordD["y"])
explosionCount += 1
elif key == "ctrl":
explosions[explosionCount] = explosion()
explosions[explosionCount].set_pos(car1.CoordD["x"], car1.CoordD["y"])
explosionCount += 1
def handle_keyup(key):
global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed
if key == "left":
leftPressed = False
elif key == "right":
rightPressed = False
elif key == "up":
upPressed = False
elif key == "down":
downPressed = False
elif key == "a":
aPressed = False
elif key == "d":
dPressed = False
elif key == "w":
wPressed = False
elif key == "s":
sPressed = False
#init()
start() | Python | 225 | 25.559999 | 140 | /art/prj_racetrack.py | 0.67113 | 0.63749 |
rexapex/tealight-files | refs/heads/master | from tealight.art import (color, line, spot, circle, box, image, text, background)
from tealight.art import screen_width, screen_height
from math import sin, cos, pi
running = False
car1 = None
def handle_keydown(key):
global ax, ay
if key == "left" or key == "right":
car1.ax = 1
elif key == "up" or key == "down":
car1.ay = 1
def start():
global car1
background("track.png")
car1 = car()
car1.init()
car1.draw()
running = True
update()
def update():
global running
while True:
print("running")
car1.update()
draw()
def draw():
car1.draw()
class car:
x = 0
y = 0
vx = 0
vy = 0
ax = 0
ay = 0
def init(self):
x = 0
y = 0
vx = 0
vy = 0
ax = 0
ay = 0
def update(self):
vx = vx + ax
vy = vy + ay
x = x + vx
y = y + vy
def draw(self):
color("red")
spot(self.x, self.y, 25)
start() | Python | 70 | 12.371428 | 82 | /art/racetrack.py | 0.534759 | 0.508021 |
rexapex/tealight-files | refs/heads/master | from tealight.art import (color, line, spot, circle, box, image, text, background)
from tealight.art import screen_width, screen_height
from math import sin, cos, pi
class car:
x = 0
y = 0
orientation = 0
acceleration = 0
power = 0.3
def update(self):
self.x += self.acceleration
def draw(self):
spot(self.x, self.y, 25)
def editOrientation(self, dOri):
self.orientation = self.orientation + dOri
def editAcceleration(self, da):
self.acceleration = self.acceleration + da
| Python | 25 | 20.200001 | 82 | /art/prj_car.py | 0.657944 | 0.642991 |
rexapex/tealight-files | refs/heads/master | from tealight.robot import (move,
turn,
look,
touch,
smell,
left_side,
right_side)
# Add your code here
def moveBy(spaces):
for i in range(0, spaces):
move()
def go():
moveBy(3)
turn(-1)
while True:
if right_side() or left_side():
move()
go() | Python | 25 | 17.280001 | 39 | /robot/mine.py | 0.366228 | 0.359649 |
rexapex/tealight-files | refs/heads/master | from tealight.logo import move, turn
def square(side):
for i in range(0,4):
move(side)
turn(90)
def chessboard():
sqSize = 8
for i in range(0, 8):
for j in range(0, 8):
square(sqSize)
move(sqSize)
turn(180)
move(8 * sqSize)
turn(-90)
move(8)
turn(-90)
turn(-90)
chessboard() | Python | 21 | 14.809524 | 36 | /logo/chess.py | 0.570997 | 0.510574 |
rexapex/tealight-files | refs/heads/master | from tealight.art import (color, line, spot, circle, box, image, text, background)
from tealight.art import screen_width, screen_height
from math import sin, cos, pi
x = screen_width / 2
y = screen_height / 2
vx = 0
vy = 0
ax = 0
ay = 0
gravity = 0.2
drag = 0
power = 0.3
explosionX = 0
explosionY = 0
explosionTime = 0
def star(x, y, c, size, spines):
color(c)
angle = 0
for i in range(0, spines):
x0 = x + (size * cos(angle))
y0 = y + (size * sin(angle))
line(x, y, x0, y0)
angle = angle + (2 * pi / spines)
def handle_keydown(key):
global ax, ay, explosionTime, explosionX, explosionY
if key == "left":
ax = -power
elif key == "right":
ax = power
elif key == "up":
ay = -power
elif key == "down":
ay = power
elif key =="space":
explosionX = x
explosionY = y
explosionTime = 50
def handle_keyup(key):
global ax, ay
if key == "left" or key == "right":
ax = 0
elif key == "up" or key == "down":
ay = 0
def do_explosion():
global explosionTime, explosionX, explosionY
if explosionTime > 0:
star(explosionX, explosionY, "orange", 50-explosionTime, 50-explosionTime)
explosionTime -= 1
if explosionTime == 0:
star(explosionX, explosionY, "white", 50-explosionTime, 50-explosionTime)
def handle_frame():
global x,y,vx,vy,ax,ay
color("white")
spot(x,y,8)
vx = vx + ax
vy = vy + ay + gravity
drag = - vy * 0.005
vy += drag
x = x + vx
y = y + vy
do_explosion()
color("blue")
spot(x,y,8)
| Python | 90 | 16.411112 | 82 | /art/orbits.py | 0.583598 | 0.556898 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
import keras
import pandas as pd
def rotate(face,left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y):
lx = left_eye_center_x
ly = left_eye_center_y
rx = right_eye_center_x
ry = right_eye_center_y
# carry out angle calculations through arctan
dY = ry - ly
dX = rx - lx
angle = np.degrees(np.arctan2(dY, dX)) # angle should be in degrees
scale = 1
cx = (rx+lx)/2.0
cy = (ry+ly)/2.0
center = (cx,cy) # rotation will take place around the eye center
return center, angle, scale
def detectEyeCenters(face):
cnn = load_model('src/CNN_21_1000.h5')
# find the scaling ratios
faceHeight = np.float32(face.shape[0])
faceWidth = np.float32(face.shape[1])
heightScaling = 96.0/faceHeight
widthScaling = 96.0/faceWidth
face2 = face
# resize the image to the size on which the CNN was trained
faceResized = cv2.resize(face2,(96,96))
# prepare Input for CNN
faceResized = np.expand_dims(faceResized,axis=0)
faceResized = np.expand_dims(faceResized,axis=3)
faceResized = np.float32(faceResized)
faceResized = faceResized/255.0
# obtain output
outputVector = cnn.predict(faceResized)
outputVector = (outputVector*48) + 48
# scale up the eye centers obtained
ref_left_eye_center_x = outputVector[0,2]/widthScaling
ref_left_eye_center_y = outputVector[0,3]/heightScaling
ref_right_eye_center_x = outputVector[0,0]/widthScaling
ref_right_eye_center_y = outputVector [0,1]/heightScaling
print (ref_left_eye_center_x,ref_left_eye_center_y,ref_right_eye_center_x,ref_right_eye_center_y)
keras.backend.clear_session()
# load haar cascade classifiers
eye_cascade = cv2.CascadeClassifier('src/haarcascade_eye.xml')
eye_cascade_2 = cv2.CascadeClassifier('src/haarcascade_eye_2.xml')
eyes = eye_cascade.detectMultiScale(face)
if(len(eyes)<2):
eyes = eye_cascade_2.detectMultiScale(face)
print (eyes)
boundaryX = face.shape[1]/2.0 # separate them into Left and Right
boundaryY = face.shape[0]/2.0 # remove bottom half false candidates
eyeCenterLeftX = []
eyeCenterLeftY = []
eyeCenterLeftArea = []
eyeCenterRightX = []
eyeCenterRightY = []
eyeCenterRightArea = []
# separate out all possible eye centers candidate into LHS and RHS candidates
for i in range(0,len(eyes)):
if(eyes[i][0] + (eyes[i][2]/2.0) <= boundaryX - (boundaryX/16) and eyes[i][1] + (eyes[i][3]/2.0) <= boundaryY):
eyeCenterLeftX.append(eyes[i][0] + (eyes[i][2]/2.0))
eyeCenterLeftY.append(eyes[i][1] + (eyes[i][3]/2.0))
eyeCenterLeftArea.append(eyes[i][2] * eyes[i][3])
if(eyes[i][0] + (eyes[i][2]/2.0) > boundaryX + (boundaryX/16) and eyes[i][1] + (eyes[i][3]/2.0) <= boundaryY):
eyeCenterRightX.append(eyes[i][0] + (eyes[i][2]/2.0))
eyeCenterRightY.append(eyes[i][1] + (eyes[i][3]/2.0))
eyeCenterRightArea.append(eyes[i][2] * eyes[i][3])
indexL = 0
indexR = 0
if(len(eyeCenterLeftX) > 0 ):
# obtain main left-eye-center through the largest eye-box area criteria
minimumL = eyeCenterLeftArea[0]
for i in range(0,len(eyeCenterLeftArea)):
if eyeCenterLeftArea[i] >= minimumL:
indexL = i
minimumL = eyeCenterLeftArea[i]
# compare obtained haar cordinates to CNN coordinates
if(abs(eyeCenterLeftX[indexL] - ref_left_eye_center_x) < 2.5/widthScaling):
left_eye_center_x = eyeCenterLeftX[indexL]
else:
left_eye_center_x = ref_left_eye_center_x
if(abs(eyeCenterLeftY[indexL] - ref_left_eye_center_y) < 2.5/heightScaling):
left_eye_center_y = eyeCenterLeftY[indexL]
else:
left_eye_center_y = ref_left_eye_center_y
else:
left_eye_center_x = ref_left_eye_center_x
left_eye_center_y = ref_right_eye_center_y
if(len(eyeCenterRightX) > 0):
# obtain main right-eye-center through the largest eye-box area criteria
minimumR = eyeCenterRightArea[0]
for i in range(0,len(eyeCenterRightArea)):
if eyeCenterRightArea[i] >= minimumR:
indexR = i
minimumR = eyeCenterRightArea[i]
# compare obtained haar cordinates to CNN coordinates
if(abs(eyeCenterRightX[indexR] - ref_right_eye_center_x) < 2.5/widthScaling):
right_eye_center_x = eyeCenterRightX[indexR]
else:
right_eye_center_x = ref_right_eye_center_x
if(abs(eyeCenterRightY[indexR] - ref_right_eye_center_y) < 2.5/heightScaling):
right_eye_center_y = eyeCenterRightY[indexR]
else:
right_eye_center_y = ref_right_eye_center_y
else:
right_eye_center_x = ref_right_eye_center_x
right_eye_center_y = ref_right_eye_center_y
# print ref_left_eye_center_x,ref_left_eye_center_y,ref_right_eye_center_x,ref_right_eye_center_y
print (left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y)
return left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y
| Python | 148 | 32.020271 | 113 | /src/alignment.py | 0.697565 | 0.672601 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | from flask import Flask
from flask import request
import base64
import siameseTrain as ST1
import siameseTest as ST2
import siameseRecognizer as SR
import json
app = Flask(__name__)
@app.route("/")
def hello():
return "Connection Successful"
@app.route("/upload", methods=['POST'])
def upload():
base64Data = request.form.get('imageData')
empCount = request.form.get('empCount')
with open("data/library/train2/"+ str(empCount) + ".jpeg", "wb") as fh:
fh.write(base64.b64decode(base64Data))
return "Data Received"
@app.route("/train",methods=['GET'])
def train():
ST1.calculateTrainEmbeddings();
return "Repository Embeddings Generated"
@app.route("/verify", methods=['POST'])
def verify():
base64Data = request.form.get('imageData')
with open("data/library/test2/" + "test.jpeg", "wb") as fh:
fh.write(base64.b64decode(base64Data))
ST2.calculateTestEmbeddings();
response = SR.calculateNorm();
response = json.dumps(response)
return str(response)
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True) | Python | 42 | 23.857143 | 72 | /src/app.py | 0.706616 | 0.67977 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | import numpy as np
import matplotlib.pyplot as plt
import cv2
def convertToRGB(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def convertToGRAY(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def detect(model,weights,image,isPath):
# specify locations of the model and its weights
args = {}
args["model"] = model # model-definition
args["weights"] = weights # pre-trained weights
args["image"] = image # images are loaded as 3D matrix - (h x w x c)
args["confidence"] = 0.75 # when confidence>value then it is a face
# load the caffe model
print ("[INFO] Loading model")
# net = cnn used to detect faces
net = cv2.dnn.readNetFromCaffe(args["model"], args["weights"])
# load the input image
if(isPath==True):
image = cv2.imread(args["image"])
else:
image = image
# print len(image) # height of the image
# print len(image[0]) # width of the image
# print len(image[0][0]) # no of color-channels
# print image.shape # stores h,w,c values
(h, w) = image.shape[:2]
# construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
# along with doing a mean subtraction
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)),
1.0, (300, 300), (104.0, 177.0, 123.0))
print ("[INFO] Computing face detections...")
net.setInput(blob)
detections = net.forward()
count = 0 # count of no of faces detected
faces = {} # stores the faces rectangles co-ordinates
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for the
# face
faces[i] = []
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int") # extracting integral values
# adding area details along with co-ordinate values
if(startX < 0):
startX = 0
if(startY < 0):
startY = 0
if(endX < 0):
endX = 0
if(endY < 0):
endY = 0
if(startX > w):
startX = w
if(startY > h):
startY = h
if(endX > w):
endX = w
if(endY > h):
endY = h
faces[i].extend([startX,endX,startY,endY,((endX-startX)*(endY-startY))])
# plotting the face rectangle
x = []
y = []
# plot the box
x.extend([startX,endX,endX,startX,startX])
y.extend([startY,startY,endY,endY,startY])
plt.plot(x,y)
count = count + 1
print ("Faces Detected = " + str(count))
largestFaceIndex = -1
largestAreaYet = 0
for i in range(0,len(faces)):
if(faces[i][4]>largestAreaYet):
largestFaceIndex = i
largestAreaYet = faces[i][4]
if isPath == True:
return convertToRGB(image),convertToGRAY(image),faces[largestFaceIndex]
else:
return image,convertToGRAY(image),faces[largestFaceIndex]
if __name__ == '__main__':
model = "src/deploy.prototxt.txt" # model-definition
weights = "src/res10_300x300_ssd_iter_140000.caffemodel" # pre-trained weights
image = "data/library/test2/test.jpeg" # image name reqd. images are loaded as 3D matrix - (h x w x c)
isPath = True
print ("Hello")
plt.subplot(2,1,1)
colorImage, grayImage, mainFaceBox = detect(model,weights,image,isPath)
plt.imshow(colorImage)
plt.subplot(2,1,2)
mainFaceGray = grayImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]
plt.imshow(mainFaceGray)
plt.show() | Python | 117 | 29.632479 | 111 | /src/detection.py | 0.660061 | 0.631873 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | import detection
import matplotlib.pyplot as plt
import cv2
import alignment
def detectMainFace(imageName,isPath):
model = "src/deploy.prototxt.txt" # model-definition
weights = "src/res10_300x300_ssd_iter_140000.caffemodel" # pre-trained weights
image = imageName # image name reqd. images are loaded as 3D matrix - (h x w x c)
# send for face detection
colorImage, grayImage, mainFaceBox = detection.detect(model,weights,image,isPath)
# crop the misaligned face from the whole image
mainFaceGray = grayImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]
mainFaceColor = colorImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]
return colorImage, mainFaceColor, mainFaceGray, mainFaceBox
def alignImage(colorImage,mainFaceGray,mainFaceBox):
# obtain eye centers
left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y = alignment.detectEyeCenters(mainFaceGray)
# obtain affine transformation values
center, angle, scale = alignment.rotate(mainFaceGray,left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y)
# update co-ordinates according to colorImage the orignal iage
left_eye_center_x = left_eye_center_x + mainFaceBox[0]
right_eye_center_x = right_eye_center_x + mainFaceBox[0]
left_eye_center_y = left_eye_center_y + mainFaceBox[2]
right_eye_center_y = right_eye_center_y + mainFaceBox[2]
center = (center[0]+mainFaceBox[0],center[1]+mainFaceBox[2])
# perform affine transformation
M = cv2.getRotationMatrix2D(center, angle, scale)
alignedImage = cv2.warpAffine(colorImage,M,(colorImage.shape[1],colorImage.shape[0]),flags=cv2.INTER_CUBIC)
return alignedImage, left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y
# return the face in gray scale
def getFaceGray(imagePath):
# detect the misaligned largest face in gray
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(imagePath,True)
# straighten the actual image
alignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)
# detect the aligned largest face in gray
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)
# apply denoising
mainFaceGray = cv2.fastNlMeansDenoising(mainFaceGray) # denoising
return mainFaceGray # returns a grayscaled,aligned,(256,256) face
# return the face in RGB
def getFaceColor(imagePath):
# detect the misaligned largest face in gray
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(imagePath,True)
# straighten the actual image
alignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)
# detect the aligned largest face in gray
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)
# apply denoising
mainFaceColor = cv2.fastNlMeansDenoisingColored(mainFaceColor) # denoising
return mainFaceColor # returns a grayscaled,aligned,(256,256) face
if __name__ == '__main__':
plt.subplot(2,2,1)
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace('data/library/test2/test.jpeg',True)
plt.imshow(colorImage)
plt.subplot(2,2,2)
plt.imshow(mainFaceColor)
alignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)
X = [e1x,e2x]
Y = [e1y,e2y]
plt.subplot(2,2,3)
plt.imshow(alignedImage)
plt.plot(X,Y,'-D',markersize=3)
plt.subplot(2,2,4)
# plt.imshow(alignedImage,cmap='gray')
# plt.show()
colorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)
plt.imshow(mainFaceColor)
plt.show()
# plt.imshow(getFace('data/library/train/IMG_0007.JPG'),cmap='gray')
# plt.show()
| Python | 98 | 37.2449 | 128 | /src/pre_processing2.py | 0.755336 | 0.732391 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | # The pre-trained model was provided by https://github.com/iwantooxxoox/Keras-OpenFace #
import tensorflow as tf
import numpy as np
import cv2
import glob
import pre_processing2 as pre
import matplotlib.pyplot as plt
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
producer_op_list=None
)
return graph
def calculateTestEmbeddings():
graph = load_graph('src/20180402-114759/20180402-114759.pb')
faceList = []
for imagePath in glob.glob('data/library/test2/*'):
# loading cropped,RGBscale,aligned (160,160)sized faces as reqd by FaceNet
faceList.append(np.expand_dims(cv2.resize(pre.getFaceColor(imagePath),(160,160)), axis=0))
with tf.Session(graph=graph) as sess:
images_placeholder = graph.get_tensor_by_name("import/input:0")
embeddings = graph.get_tensor_by_name("import/embeddings:0")
phase_train_placeholder = graph.get_tensor_by_name("import/phase_train:0")
faceListInput = np.concatenate(faceList, axis=0)
#normalizing the input
faceListInput = np.float32(faceListInput)/255.0
feedDict = {phase_train_placeholder: False, images_placeholder: faceListInput}
values = sess.run(embeddings,feedDict)
# save embedding values
np.save('src/cstmrEmbeddings',values)
tf.reset_default_graph();
if __name__ == '__main__':
graph = load_graph('src/20180402-114759/20180402-114759.pb')
faceList = []
for imagePath in glob.glob('data/library/test2/*'):
# loading cropped,RGBscale,aligned (160,160)sized faces as reqd by FaceNet
faceList.append(np.expand_dims(cv2.resize(pre.getFaceColor(imagePath),(160,160)), axis=0))
with tf.Session(graph=graph) as sess:
images_placeholder = graph.get_tensor_by_name("import/input:0")
embeddings = graph.get_tensor_by_name("import/embeddings:0")
phase_train_placeholder = graph.get_tensor_by_name("import/phase_train:0")
faceListInput = np.concatenate(faceList, axis=0)
#normalizing the input
faceListInput = np.float32(faceListInput)/255.0
print (faceListInput.shape)
feedDict = {phase_train_placeholder: False, images_placeholder: faceListInput}
values = sess.run(embeddings,feedDict)
# save embedding values
np.save('src/cstmrEmbeddings',values)
| Python | 73 | 35.931507 | 98 | /src/siameseTest.py | 0.657895 | 0.617865 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | import numpy as np
import glob
import cv2
import pre_processing2 as pre
import matplotlib.pyplot as plt
def calculateNorm():
empEmbeddings = np.load('src/empEmbeddings.npy')
cstmrEmbeddings = np.load('src/cstmrEmbeddings.npy')
faceListTrain = []
faceListTest = []
answer = {}
norm = []
for i in range(0,len(empEmbeddings)):
for j in range(0,len(cstmrEmbeddings)):
norm.append(np.float64(np.linalg.norm(empEmbeddings[i] - cstmrEmbeddings[j])))
flag = "NO"
for e in norm:
if(e<0.9):
flag = 'YES'
break
answer['norm'] = norm
answer['result'] = flag;
return answer
if __name__ == '__main__':
empEmbeddings = np.load('src/empEmbeddings.npy')
print (empEmbeddings.shape)
cstmrEmbeddings = np.load('src/cstmrEmbeddings.npy')
print (cstmrEmbeddings.shape)
faceListTrain = []
faceListTest = []
for imagePath in glob.glob('data/library/train2/*'):
faceListTrain.append(cv2.resize(pre.getFaceColor(imagePath),(160,160)))
for imagePath in glob.glob('data/library/test2/*'):
faceListTest.append(cv2.resize(pre.getFaceColor(imagePath),(160,160)))
plt.subplot2grid((1,4),(0,0))
plt.imshow(faceListTrain[0])
for i in range(0,len(empEmbeddings)):
for j in range(0,len(cstmrEmbeddings)):
plt.subplot2grid((1,4),(0,j+1))
plt.imshow(faceListTest[j])
plt.title(np.linalg.norm(empEmbeddings[i] - cstmrEmbeddings[j]))
plt.tight_layout()
plt.suptitle('ONE SHOT LEARNING TEST')
plt.show() | Python | 60 | 22.933332 | 81 | /src/siameseRecognizer.py | 0.703136 | 0.677352 |
kapilkalra04/face-off-demo-python-flask | refs/heads/master | import glob
import numpy as np
import pre_processing2 as pre
import cv2
import matplotlib.pyplot as plt
images = []
for imagePath in glob.glob('data/library/train2/*'):
images.append(imagePath)
faceList = []
# labelList = [0,0,0,0,0,0,0,0,0,0]
labelList = [0]
index = 0
for path in images:
temp = pre.getFaceGray(path)
temp = cv2.resize(temp,(369,512))
faceList.append(temp)
print ("[INFO] Image Loaded: " + str(index+1))
print (faceList[-1].shape)
# plt.subplot2grid((5,3),(index%5,index/5))
plt.subplot2grid((1,4),(0,0))
plt.imshow(faceList[-1])
index = index + 1
print (labelList)
faceRecognizer = cv2.face.LBPHFaceRecognizer_create(1,8,8,8,123)
faceRecognizer.train(faceList,np.array(labelList))
imagesTest = []
for imagePath in glob.glob('data/library/test2/*'):
imagesTest.append(imagePath)
print ("[INFO] ========TESTING=======")
faceListTest = []
prediction = {}
index = 0
for path in imagesTest:
testSample = pre.getFaceGray(path) #np.array.shape = (256,256)
testSample = cv2.resize(testSample,(369,512))
print ("[INFO] Test Image Loaded: " + str(index+1))
prediction[index] = []
predictedLabel, confidence = faceRecognizer.predict(testSample)
# plt.subplot2grid((5,3),(index,2))
plt.subplot2grid((1,4),(0,index+1))
plt.imshow(testSample,cmap='gray')
plt.title(str(predictedLabel) + " : " + str(confidence))
prediction[index].extend([predictedLabel,confidence])
index = index + 1
plt.tight_layout()
plt.suptitle('ONE SHOT LEARNING TEST')
plt.show()
print (prediction)
| Python | 60 | 24.333334 | 65 | /src/recognition.py | 0.698026 | 0.651974 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtWidgets, QtCore, QtGui
class PADCompleter(QCompleter):
def __init__(self):
super().__init__()
self.prefix = ''
self.model = None
def _set_model_(self, model):
self.model = model
super().setModel(self.model)
def _update_model_(self):
prefix = self.prefix
class InnerProxyModel(QSortFilterProxyModel):
def filterAcceptsRow(self, row, parent):
index = self.sourceModel().index(row, 0, parent)
search_string = prefix.lower()
model_string = self.sourceModel().data(index, Qt.DisplayRole).lower()
#print(search_string, 'in', model_string, search_string in model_string)
return search_string in model_string
proxy_model = InnerProxyModel()
proxy_model.setSourceModel(self.model)
self.setModel(proxy_model)
#print('match :', proxy_model.rowCount())
def splitPath(self, path):
self.prefix = str(path)
self._update_model_()
return self.sourceModel().data() | Python | 37 | 31.378378 | 88 | /PADCompleter.py | 0.606516 | 0.602339 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
# Class Description:
# Create framework for the split screens used in PAD_GUI
# import necessary files
import os
import json
from functools import partial
from PyQt5.QtWidgets import (QLabel, QWidget, QHBoxLayout,
QFrame, QSplitter, QStyleFactory,
QGridLayout, QLineEdit, QPushButton,
QVBoxLayout, QCompleter, QComboBox,
QScrollArea, QToolTip)
from PyQt5.QtGui import QPixmap, QColor, QFont
from PyQt5.QtCore import Qt, QStringListModel
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
class CalculatorScreen(QHBoxLayout):
def __init__(self, gui):
super().__init__()
# 0 = lead1, 1 = sub1,..., 5 = lead2
self.team = [PADMonster() for x in range(6)]
self.pad_team = PADTeam(self.team)
# keeps old team stats before modification from leader multipliers
self.team_base = [PADMonster() for x in range(6)]
# open monsters.txt and load it into a python object using json
# self.json_file = requests.get('https://padherder.com/api/monsters')
self.json_file = open(os.path.join('.\monsters.txt'), 'r')
self.json_monsters = json.loads(self.json_file.read())
# print(self.json_monsters[0]["name"])
self.completer_string_list_model = QStringListModel()
array_of_monster_names = []
for x in range(len(self.json_monsters)):
array_of_monster_names.append(self.json_monsters[x]["name"])
self.completer_string_list_model.setStringList(array_of_monster_names)
# checks if the modified button has been pressed so other functions can know which stat to display
self.is_pressed = False
QToolTip.setFont(QFont('SansSerif', 10))
self.init_screen(gui)
def init_screen(self, gui):
# add things to top of the screen here (Monitor section)!
# Create an overarching top widget/layout
supreme_top_box = QWidget()
supreme_top_box_layout = QVBoxLayout()
supreme_top_box.setLayout(supreme_top_box_layout)
# Monitor section will have labels inside of a grid layout
top_box = QWidget()
grid = QGridLayout()
top_box.setLayout(grid)
supreme_top_box_layout.addWidget(top_box)
# Creates lists of labels, initially having only static labels and having
# the tangible labels substituted with ''
static_labels = ['', '', '', '', '', '', '', '',
'', 'Lead 1', 'Sub 1 ', 'Sub 2 ', 'Sub 3 ', 'Sub 4 ', 'Lead 2', 'Team Totals',
'Type:', '', '', '', '', '', '', '',
'HP:', 0, 0, 0, 0, 0, 0, 0,
'Atk:', 0, 0, 0, 0, 0, 0, 0,
'Pronged Atk:', 0, 0, 0, 0, 0, 0, 0,
'RCV:', 0, 0, 0, 0, 0, 0, 0,
'Awakenings:', '', '', '', '', '', '', '']
self.display_labels = [QLabel(gui) for x in range(len(static_labels))]
for s_label, d_label in zip(static_labels, self.display_labels):
if s_label == '':
continue
d_label.setText(str(s_label))
positions = [(i, j) for i in range(8) for j in range(8)]
for position, d_label in zip(positions, self.display_labels):
# why *position? because the array is [(i,j), (i,j),...,(i,j)]
grid.addWidget(d_label, *position)
grid.setAlignment(d_label, Qt.AlignHCenter)
self.leader_skills_labels = [QLabel(gui) for x in range(2)]
for x in range(2):
self.leader_skills_labels[x].setText('Leader Skill '+str(x+1)+': ')
supreme_top_box_layout.addWidget(self.leader_skills_labels[x])
# Create another row of labels for Awoken Skills Image Lists
# Create another row of labels to show the Leader Skill Multipliers
########################################################################
# add things to bottom of the screen here (Input section)!
# Input section will be split in two: have LineEdits in a grid layout and then PushButtons in a separate grid
# layout
bottom_box = QWidget()
grid2 = QGridLayout()
bottom_box.setLayout(grid2)
bottom_labels_text = ['Leader 1', 'Sub 1', 'Sub 2', 'Sub 3', 'Sub 4', 'Leader 2']
bottom_labels = [QLabel(gui) for x in range(6)]
instruction_labels_text = ['Please enter the name here:', 'Enter level here:', 'Enter pluses here:']
instruction_labels = [QLabel(gui) for x in range(3)]
self.line_edits = [QLineEdit(gui) for x in range(6)]
line_edit_completer = QCompleter()
line_edit_completer.setCaseSensitivity(Qt.CaseInsensitive)
line_edit_completer.setFilterMode(Qt.MatchContains)
line_edit_completer.setModel(self.completer_string_list_model)
# Combo Boxes for Levels and Pluses
level_boxes = [QComboBox(gui) for x in range(6)]
self.plus_boxes_types = [QComboBox(gui) for x in range(6)]
self.plus_boxes_values = [QComboBox(gui) for x in range(6)]
for x in range(6):
for n in range(0,100):
if n != 0 and n <= self.team[x].max_level:
level_boxes[x].addItem(str(n))
self.plus_boxes_values[x].addItem(str(n))
self.plus_boxes_types[x].addItem('hp')
self.plus_boxes_types[x].addItem('atk')
self.plus_boxes_types[x].addItem('rcv')
self.plus_boxes_values[x].hide()
# add the labels and line_edits to the bottom grid
for x in range(6):
bottom_labels[x].setText(bottom_labels_text[x])
bottom_labels[x].adjustSize()
grid2.addWidget(bottom_labels[x], *(x+1, 0))
grid2.addWidget(self.line_edits[x], *(x+1, 1))
grid2.addWidget(level_boxes[x], *(x+1, 2))
grid2.addWidget(self.plus_boxes_types[x], *(x+1, 3))
grid2.addWidget(self.plus_boxes_values[x], *(x+1, 3))
self.line_edits[x].textChanged[str].connect(partial(self._on_changed_, x))
self.line_edits[x].setCompleter(line_edit_completer)
self.line_edits[x].setMaxLength(50)
level_boxes[x].activated[str].connect(partial(self._on_level_activated_, x))
self.plus_boxes_types[x].activated[str].connect(partial(self._on_plus_type_activated_, x))
for x in range(3):
instruction_labels[x].setText(instruction_labels_text[x])
instruction_labels[x].adjustSize()
grid2.addWidget(instruction_labels[x], *(0, x+1))
###########################################################################
# create the button widgets in a separate widget below bottom_box
below_bottom_box = QWidget()
grid3 = QGridLayout()
below_bottom_box.setLayout(grid3)
# create a set of buttons below the line_edits:
# White(Base) Red Blue Green Yellow Purple
buttons = []
button_labels = ['Fire', 'Water', 'Wood', 'Light', 'Dark', 'Base']
button_colors = ['red', 'lightskyblue', 'green', 'goldenrod', 'mediumpurple', 'white']
for x in range(6):
buttons.append(QPushButton(button_labels[x], gui))
buttons[x].clicked.connect(partial(self._handle_button_, x))
buttons[x].setStyleSheet('QPushButton { background-color : %s }' % button_colors[x])
grid3.addWidget(buttons[x], *(0, x))
# create a QHBoxLayout widget that holds the page turners and toggle
page_turner = QWidget()
page_turner_layout = QHBoxLayout()
page_turner.setLayout(page_turner_layout)
# create the page turner and toggle widgets
page_turner_layout.addStretch()
self.toggle_button = QPushButton('Toggle On Modified Stats', gui)
self.toggle_button.setCheckable(True)
self.toggle_button.clicked[bool].connect(self._handle_toggle_button_)
page_turner_layout.addWidget(self.toggle_button)
page_turner_layout.addStretch()
# Create overarching bottom widget
supreme_bottom_box = QWidget()
supreme_bottom_box_layout = QVBoxLayout()
supreme_bottom_box.setLayout(supreme_bottom_box_layout)
button_label = QLabel('Select from below the attribute you would like to display.')
supreme_bottom_box_layout.setAlignment(button_label, Qt.AlignHCenter)
supreme_bottom_box_layout.addWidget(bottom_box)
supreme_bottom_box_layout.addWidget(button_label)
supreme_bottom_box_layout.addWidget(below_bottom_box)
supreme_bottom_box_layout.addWidget(page_turner)
# Add the two screens into a split screen
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(supreme_top_box)
splitter.addWidget(supreme_bottom_box)
# Add the split screen to our main screen
self.addWidget(splitter)
def _create_monster_(self, index, dict_index, name):
"""
When a valid name has been entered into the line edits, create a PADMonster Class
using the values stored in the json dictionary and save the PADMonster to the appropriate
index in the team array and PADTeam Class subsequently.
:param index: 0 = lead 1, 1 = sub 1, 2 = sub 2, 3 = sub 3, 4 = sub 4, 5 = lead 2
:param dict_index: the index in the json dictionary containing the monster
:param name: the monster's name
"""
self.team[index] = PADMonster()
self.team_base[index] = PADMonster()
hp_max = self.json_monsters[dict_index]["hp_max"]
atk_max = self.json_monsters[dict_index]["atk_max"]
rcv_max = self.json_monsters[dict_index]["rcv_max"]
attr1 = self.json_monsters[dict_index]["element"]
attr2 = self.json_monsters[dict_index]["element2"]
type1 = self.json_monsters[dict_index]["type"]
type2 = self.json_monsters[dict_index]["type2"]
image60_size = self.json_monsters[dict_index]["image60_size"]
image60_href = self.json_monsters[dict_index]["image60_href"]
awakenings = self.json_monsters[dict_index]["awoken_skills"]
leader_skill_name = self.json_monsters[dict_index]["leader_skill"]
max_level = self.json_monsters[dict_index]["max_level"]
hp_min = self.json_monsters[dict_index]["hp_min"]
atk_min = self.json_monsters[dict_index]["atk_min"]
rcv_min = self.json_monsters[dict_index]["rcv_min"]
hp_scale = self.json_monsters[dict_index]["hp_scale"]
atk_scale = self.json_monsters[dict_index]["atk_scale"]
rcv_scale = self.json_monsters[dict_index]["rcv_scale"]
# use PAD_Monster's function to set our monster's stats
self.team[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,
type2, image60_size, image60_href, awakenings,
leader_skill_name, max_level, hp_min, hp_scale,
atk_min, atk_scale, rcv_min, rcv_scale)
# create a PADTeam Class according to our team of Six PADMonster Classes
self.pad_team = PADTeam(self.team)
# set our labels according to our monsters
self._set_labels_(self.team[index], index)
# save our team for future modifications:
self.team_base[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,
type2, image60_size, image60_href, awakenings,
leader_skill_name, max_level, hp_min, hp_scale,
atk_min, atk_scale, rcv_min, rcv_scale)
def _set_labels_(self, monster, index):
"""
Set the labels according to the values in the indexed PADMonster Class
:param monster: the PADMonster associated with the index
:param index: the index associated with the PADMonster [0-5]
"""
# extract and display image
self.display_labels[index + 1].setPixmap(QPixmap(os.path.join('images') + '/' + monster.name + '.png'))
# display name
font = QFont()
font.setPointSize(5)
type_text = monster.type_main_name+'/'+monster.type_sub_name
self.display_labels[index + 17].setText(type_text)
self.display_labels[index + 17].setFont(font)
self.display_labels[index + 17].adjustSize()
self.display_labels[index + 17].setToolTip(type_text)
# display hp
hp = monster.hp
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed:
hp *= self.pad_team.stats_modified_by[index][0]
# if plus values have been set, display how many
if monster.hp_plus > 0:
self.display_labels[index + 25].setText(str(round(hp)) + ' (+' + str(monster.hp_plus) + ')')
else:
self.display_labels[index + 25].setText(str(round(hp)))
self.display_labels[index + 25].adjustSize()
# display attack and pronged attack of main element
self._set_attack_labels_(index, 5, monster.atk[monster.attr_main], monster.pronged_atk[monster.attr_main],
monster.base_atk_plus)
# display rcv
rcv = monster.rcv
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed:
rcv *= self.pad_team.stats_modified_by[index][2]
# if plus values have been set, display how many
if monster.rcv_plus > 0:
self.display_labels[index + 49].setText(str(round(rcv)) + ' (+' + str(monster.rcv_plus) + ')')
else:
self.display_labels[index + 49].setText(str(round(rcv)))
self.display_labels[index + 49].adjustSize()
# display awakenings
awakenings_text = ''
awakenings_font = QFont()
awakenings_font.setPointSize(6)
for x in range(len(monster.awakenings)):
if monster.awakenings[x][2] > 0:
awakenings_text += monster.awakenings[x][0]+': '+str(monster.awakenings[x][2])+'\n'
# set awakenings string to a tooltip since it can't fit into the grid
self.display_labels[index + 57].setText('Hover Me!')
self.display_labels[index + 57].setFont(awakenings_font)
self.display_labels[index + 57].adjustSize()
self.display_labels[index + 57].setToolTip(awakenings_text)
# calculate and change our display labels for team total values with each change in monster
self._set_team_labels_()
# if the monster is in the first or last index, it's considered the leader and its leader skill name
# and effect are displayed accordingly.
if index == 0:
text = 'Leader Skill 1: '+self.team[0].leader_skill_name+' > '+self.team[0].leader_skill_desc
# if the string is too long, splice it up
if len(text) > 50:
divider = len(text)//2
# separate the string at a part that is a whitespace
while text[divider] != ' ':
divider += 1
final_text = text[:divider]+'\n'+text[divider:]
else:
final_text = text
self.leader_skills_labels[0].setText(final_text)
elif index == 5:
text = 'Leader Skill 1: '+self.team[5].leader_skill_name+' > '+self.team[5].leader_skill_desc
# if the string is too long, splice it up
if len(text) > 50:
divider = len(text)//2
# separate the string at a part that is a whitespace
while text[divider] != ' ':
divider += 1
final_text = text[:divider]+'\n'+text[divider:]
else:
final_text = text
self.leader_skills_labels[1].setText(final_text)
def _set_attack_labels_(self, index, color_num, atk_value, pronged_atk_value, plus_value = 0):
"""
Set the attack labels according to the values given.
:param index: the index of the PADMonster [0-5] and 6 = the team total
:param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:param atk_value: the value to be displayed in the attack label
:param pronged_atk_value: the value to be displayed in the pronged attack label
:param plus_value: the amount of pluses is set to 0 initially
"""
# an array holding the colors associated with each value of color_num
colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'black']
# if modified by leader skills button has been pressed, multiply monster's stat by its
# respective index in the stats modified variable of the PADTeam Class
if self.is_pressed and index != 6:
atk_value *= self.pad_team.stats_modified_by[index][1]
pronged_atk_value *= self.pad_team.stats_modified_by[index][1]
# display attack of main element
if plus_value > 0:
self.display_labels[index + 33].setText(str(round(atk_value)) + ' (+' + str(plus_value) + ')')
else:
self.display_labels[index + 33].setText(str(round(atk_value)))
self.display_labels[index + 33].setStyleSheet("QLabel { color : %s }" % colors[color_num])
self.display_labels[index + 33].adjustSize()
# display pronged attack of main element
self.display_labels[index + 41].setText(str(round(pronged_atk_value)))
self.display_labels[index + 41].setStyleSheet("QLabel {color : %s }" % colors[color_num])
self.display_labels[index + 41].adjustSize()
def _set_team_labels_(self):
"""
Access the PADTeam Class to extract the values to be displayed in the Team Totals Labels
"""
# initialize objects to store the total values
hp_total = self.pad_team.hp
atk_total = self.pad_team.base_atk
pronged_atk_total = self.pad_team.base_pronged_atk
rcv_total = self.pad_team.rcv
total_awakenings = self.pad_team.awakenings
# if the modified by leader skills button is pressed, use the team's modified stats instead
if self.is_pressed:
hp_total = self.pad_team.hp_modified
atk_total = self.pad_team.base_atk_modified
pronged_atk_total = self.pad_team.base_pronged_atk_modified
rcv_total = self.pad_team.rcv_modified
# display our total value objects on our labels
self.display_labels[31].setText(str(round(hp_total)))
self.display_labels[31].adjustSize()
self._set_attack_labels_(6, 5, atk_total, pronged_atk_total)
self.display_labels[55].setText(str(round(rcv_total)))
self.display_labels[55].adjustSize()
# set the label containing the team's total awakenings to a tooltip since it won't fit
awakenings_font = QFont()
awakenings_font.setPointSize(6)
self.display_labels[63].setText('Hover Me!')
self.display_labels[63].setFont(awakenings_font)
self.display_labels[63].adjustSize()
self.display_labels[63].setToolTip(total_awakenings)
def _get_total_attr_attack_(self, attr):
"""
Returns the values stored in PADTeam for the Team's Total Attacks and Pronged Attacks
for the specified element or the sum of all the element's attacks (BASE)
:param attr: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:return:
"""
# if we're not looking for the base values a.k.a. sum of all the values
if attr != 5:
if not self.is_pressed:
atk_total = self.pad_team.atk[attr]
pronged_atk_total = self.pad_team.pronged_atk[attr]
else:
atk_total = self.pad_team.atk_modified[attr]
pronged_atk_total = self.pad_team.pronged_atk_modified[attr]
# if we're looking for the base values
else:
if not self.is_pressed:
atk_total = self.pad_team.base_atk
pronged_atk_total = self.pad_team.base_pronged_atk
else:
atk_total = self.pad_team.base_atk_modified
pronged_atk_total = self.pad_team.base_pronged_atk_modified
return atk_total, pronged_atk_total
# when line_edits are altered, activate this line code according to the text in the line
def _on_changed_(self, index, text):
"""
When a line edit is altered, check the text entered to see if it matches with any of
the names in the json dictionary and create a PADMonster at the appropriate index in
the team array if the name is found.
:param index: the index of the line edit corresponding to the index of the PADMonster
in the team array.
:param text: the text currently inside the line edit
"""
for x in range(len(self.json_monsters)):
if text == self.json_monsters[x]["name"]:
self._create_monster_(index, x, text)
elif text.title() == self.json_monsters[x]["name"]:
self._create_monster_(index, x, text.title())
def _handle_button_(self, color_num, pressed):
"""
Only show the Attack and Pronged Attack values of the appropriate element or sum of the
elements if BASE is chosen.
:param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base
:param pressed: useless event input
"""
for index in range(6):
if color_num == 5:
self._set_attack_labels_(index, color_num, self.team[index].atk[self.team[index].attr_main],
self.team[index].pronged_atk[self.team[index].attr_main])
else:
self._set_attack_labels_(index, color_num, self.team[index].atk[color_num],
self.team[index].pronged_atk[color_num])
atk_total, pronged_atk_total = self._get_total_attr_attack_(color_num)
self._set_attack_labels_(6, color_num, atk_total, pronged_atk_total)
def _handle_toggle_button_(self, pressed):
"""
If the modify stats by leader skills button is pressed, modify the button's text, set
the Class Variable is_pressed to True/False accordingly, and reset the labels now that
is_pressed has been changed.
:param pressed: Useless event input.
"""
if pressed:
self.is_pressed = True
self.toggle_button.setText('Toggle Off Modified Stats')
else:
self.is_pressed = False
self.toggle_button.setText('Toggle On Modified Stats')
for monster in range(6):
self._set_labels_(self.team[monster], monster)
def _on_level_activated_(self, index, level):
"""
If a level for the PADMonster has been selected, change the monster's base stats
according to that level, reset pad_team according to these new values and reset
labels accordingly.
:param index: PADMonster's index in the team array. [0-5]
:param level: the level the PADMonster will be set to
"""
self.team[index]._set_stats_at_level_(int(level))
self.team_base[index]._set_stats_at_level_(int(level))
self.pad_team = PADTeam(self.team)
for monster in range(6):
self._set_labels_(self.team[monster], monster)
def _on_plus_type_activated_(self, index, text):
"""
If hp, atk, or rcv has been selected in the drop down menu, hide the menu asking for the
type and show the menu asking for the value of pluses between 0-99.
:param index: PADMonster's index in the team array. [0-5]
:param text: 'hp', 'atk', or 'rcv'
"""
self.plus_boxes_types[index].hide()
self.plus_boxes_values[index].show()
try: self.plus_boxes_values[index].activated[str].disconnect()
except Exception: pass
self.plus_boxes_values[index].activated[str].connect(partial(self._on_plus_value_activated_, index, text))
self.plus_boxes_types[index].disconnect()
def _on_plus_value_activated_(self, index, type, value):
"""
If the value pertaining to the specified type has been selected, modify the appropriate
stat of the indexed PADMonster according the specified amount of pluses, reset the
pad_team according to the modified stats, and redisplay the new values
:param index: PADMonster's index in the team array. [0-5]
:param type: 'hp', 'atk', or 'rcv'
:param value: the value, 0-99, of pluses the PADMonster has for the specified type
"""
self.plus_boxes_types[index].show()
self.plus_boxes_types[index].activated[str].connect(partial(self._on_plus_type_activated_, index))
self.plus_boxes_values[index].hide()
self.team[index]._set_stats_with_pluses_(type, int(value))
self.team_base[index]._set_stats_with_pluses_(type, int(value))
self.pad_team = PADTeam(self.team)
for monster in range(6):
self._set_labels_(self.team[monster], monster)
# class mouselistener(QLabel):
# def __init__(self):
# super().__init__()
#
# self.setMouseTracking(True)
# self.widget_location = self.rect()
#
# def mouseMoveEvent(self, event):
# posMouse = event.pos()
# font = QFont()
# if self.widget_location.contains(posMouse):
# font.setPointSize(8)
#
# QToolTip.setFont(font)
# self.setToolTip(self.text())
#
# return super().mouseReleaseEvent(event)
| Python | 536 | 47.794777 | 117 | /Calculator_Screen.py | 0.60052 | 0.589738 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
# import necessary files
from PyQt5 import PyQt5
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout,
QFrame, QSplitter, QStyleFactory,
QMainWindow, QStackedWidget)
from PyQt5.QtCore import Qt
from PADScreen import PADScreen
class GUIMainWindow(QMainWindow):
def __init__(self):
super().__init__()
widget = PADScreen(self)
self.setCentralWidget(widget)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('PAD Damage Calculator')
self.show()
class PADGUI(QStackedWidget):
def __init__(self, main_window):
super().__init__()
self.init_UI(main_window)
def init_UI(self, main_window):
#The initial screen that we'll be working on
screen = PADScreen(self, main_window)
screen_widget = QWidget(main_window)
#Make the main screen our layout
screen_widget.setLayout(screen)
self.addWidget(screen_widget)
#Add simulation screen here:
#Set the window dimensions, title and show it off!
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('PAD Damage Calculator')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = GUIMainWindow()
sys.exit(app.exec_()) | Python | 54 | 22.75926 | 64 | /PAD_GUI.py | 0.662246 | 0.640406 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
# Class Description:
# Our Monster Class where we hold all of the Monster's stats and calculate the values needed with those stats
import os
import json
class PADMonster:
def __init__(self):
# initialize the Class's stats
# _max, _min, and _scale are used for when the monster's level is set to something other than its max level
# _bonus used for when awakenings add value to the base stat
self.name = ''
self.hp = 0
self.hp_max = 0
self.hp_min = 0
self.hp_scale = 0
self.hp_plus = 0
self.hp_bonus = 0
self.hp_base = 0
self.rcv_base = 0
self.rcv = 0
self.rcv_max = 0
self.rcv_min = 0
self.rcv_scale = 0
self.rcv_plus = 0
self.rcv_bonus = 0
self.base_base_atk = 0
self.base_atk = 0
self.base_atk_max = 0
self.base_atk_min = 0
self.base_atk_scale = 0
self.base_atk_plus = 0
self.base_atk_bonus = 0
# Array of Attack: atk[attribute]
self.atk = [0, 0, 0, 0, 0]
# Array of Pronged Attack: [attribute][0 = Main, 1 = Sub]
self.pronged_atk = [0, 0, 0, 0, 0]
self.max_level = 99
self.current_level = 99
# 'fire' = 0, 'water' = 1, 'wood' = 2, 'light' = 3, 'dark' = 4
self.attr_main = 0
self.attr_sub = 0
# check if main attribute = sub attribute
self.is_same_attr = False
# save list of attribute types
self.attributes = ['fire', 'water', 'wood', 'light', 'dark']
# see list of types for corresponding index number
self.type_main = 0
self.type_sub = 0
self.type_main_name = ''
self.type_sub_name = ''
# save list of types
self.types = ['Evo Material', 'Balanced', 'Physical', 'Healer', 'Dragon', 'God', 'Attacker',
'Devil', '', '', '', '', 'Awoken Skill Material', 'Protected', 'Enhance Material']
# save leader skill multipliers; leader_skill[0 = hp, 1 = atk, 2 = rcv]
self.leader_skill = [0, 0, 0]
# store image 60x60 size and file location on padherder.com
self.image60_size = 0
self.image60_href = ''
# save amount of each awoken skill
# id: 1 -> Enhanced HP, 2 -> Enhanced Attack, 3 -> Enhanced Heal, 4 -> Reduce Fire Damage,
# 5 -> Reduce Water Damage,
# 6 -> Reduce Wood Damage, 7 -> Reduce Light Damage, 8 -> Reduce Dark Damage, 9 -> Auto-Recover,
# 10 -> Resistance-Bind, 11 -> Resistance-Dark, 12 -> Resistance-Jammers, 13 -> Resistance-Poison,
# 14 -> Enhanced Fire Orbs, 15 -> Enhanced Water Orbs, 16 -> Enhanced Wood Orbs, 17 -> Enhanced Light Orbs,
# 18 -> Enhanced Dark Orbs, 19 -> Extend Time, 20 -> Recover Bind, 21 -> Skill Boost, 22 -> Enhanced Fire Att.,
# 23 -> Enhanced Water Att., 24 -> Enhanced Wood Att., 25 -> Enhanced Light Att., 26 -> Enhanced Dark Att.,
# 27 -> Two-Pronged Attack, 28 -> Resistance-Skill Lock
self.awakenings = [['', '', 0] for x in range(28)]
self.awakenings_names = ['Enhanced HP', 'Enhanced Attack', 'Enhanced Heal', 'Reduce Fire Damage',
'Reduce Water Damage', 'Reduce Wood Damage', 'Reduce Light Damage',
'Reduce Dark Damage', 'Auto-Recover', 'Resistance-Bind', 'Resistance-Dark',
'Resistance-Jammers', 'Resistance-Poison', 'Enhanced Fire Orbs', 'Enhanced Water Orbs',
'Enahnced Wood Orbs', 'Enhanced Light Orbs', 'Enhanced Dark Orbs', 'Extend Time',
'Recover Bind', 'Skill Boost', 'Enhanced Fire Att.', 'Enhanced Water Att.',
'Enhanced Wood Att.', 'Enhanced Light Att.', 'Enhanced Dark Att.',
'Two-Pronged Attack', 'Resistance-Skill Lock']
# open awakenings.txt and load it into a python object using json
self.json_file = open(os.path.join('awakenings.txt'), 'r')
self.json_awakenings = json.loads(self.json_file.read())
# iterate through self.json_awakenings and extract the necessary information into self.awakenings
# awakenings[id-1][name, desc, count]
for awakening in self.json_awakenings:
self.awakenings[awakening['id'] - 1] = [awakening['name'], awakening['desc'], 0]
# leader skill
self.leader_skill_name = ''
self.leader_skill_desc = ''
# [xhp, xatk, xrcv, ['elem/type?', which elem/type?]]
self.leader_skill_effect = [1, 1, 1]
self.json_file = open(os.path.join('leader skills.txt'), 'r')
self.json_leader_skills = json.loads(self.json_file.read())
def set_base_stats(self, name, hp, atk, rcv, attr1, attr2, type1, type2, size, href, awakenings, leader_skill,
level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale):
self.name = name
self.hp = hp
self.hp_base = hp
self.hp_max = hp
self.hp_min = hp_min
self.hp_scale = hp_scale
self.base_atk = atk
self.base_base_atk = atk
self.base_atk_max = atk
self.base_atk_min = atk_min
self.base_atk_scale = atk_scale
self.rcv = rcv
self.rcv_base = rcv
self.rcv_max = rcv
self.rcv_min = rcv_min
self.rcv_scale = rcv_scale
self.max_level = level
self.current_level = level
self.attr_main = attr1
self.attr_sub = attr2
self.type_main = type1
self.type_main_name = self.types[type1]
self.type_sub = type2
if type2:
self.type_sub_name = self.types[type2]
self.image60_size = size
self.image60_href = href
self.leader_skill_name = leader_skill
for awakening in awakenings:
self.awakenings[awakening - 1][2] += 1
# sets _bonus stats if awakenings[0-2][2] a.k.a. the stat bonus awakenings are greater than 1
for x in range(3):
if self.awakenings[x][2] > 0:
if x == 0:
self.hp_bonus = self.awakenings[x][2] * 200
self.hp += self.hp_bonus
self.hp_base = self.hp
if x == 1:
self.base_atk_bonus = self.awakenings[x][2] * 100
self.base_atk += self.base_atk_bonus
self.base_base_atk = self.base_atk
if x == 2:
self.rcv_bonus = self.awakenings[x][2] * 50
self.rcv += self.rcv_bonus
self.rcv_base = self.rcv
# find the leader skills' effects and description in the json library according to the name
for x in range(len(self.json_leader_skills)):
if leader_skill == self.json_leader_skills[x]['name']:
self.leader_skill_desc = self.json_leader_skills[x]['effect']
if 'data' in self.json_leader_skills[x].keys():
self.leader_skill_effect = self.json_leader_skills[x]['data']
self._set_atk_(self.attr_main, self.attr_sub)
self._set_pronged_atk_(self.attr_main, self.attr_sub)
def _set_attr_main_(self, attr):
"""
If the attribute name is valid, set the Class's attr_main value to the value corresponding
to the attr
:param attr: attribute name
"""
if attr.lower() in self.attributes:
self.attr_main = self.attributes.index(attr.lower())
# if attribute is changed, check if main and sub attributes are the same
if self.attr_main == self.attr_sub:
self.is_same_attr = True
else:
self.is_same_attr = False
def _set_attr_sub_(self, attr):
"""
If the attribute name is valid, set the Class's attr_sub value to the value corresponding
to the attr
:param attr: attribute name
"""
if attr.lower() in self.attributes:
self.attr_sub = self.attributes.index(attr.lower())
# if attribute is changed, check if main and sub attributes are the same
if self.attr_main == self.attr_sub:
self.is_same_attr = True
else:
self.is_same_attr = False
def _set_atk_(self, attr1, attr2):
"""
Calculate and set atk for each attribute
:param attr1: value corresponding to main attribute
:param attr2: value corresponding to sub attribute
"""
if attr1 in [0, 1, 2, 3, 4]:
if attr1 != attr2:
self.atk[attr1] = self.base_atk
else:
self.atk[attr1] = self.base_atk * 1.1
if attr2 in [0, 1, 2, 3, 4]:
if attr1 != attr2:
self.atk[attr2] = self.base_atk * (1/3)
def _set_pronged_atk_(self, attr1, attr2):
"""
Calculate and set pronged atk for each attribute
:param attr1: value corresponding to main attribute
:param attr2: value corresponding to sub attribute
"""
if attr1 in [0, 1, 2, 3, 4]:
self.pronged_atk[attr1] = self.atk[attr1] * 1.5 ** self.awakenings[26][2]
if attr2 in [0, 1, 2, 3, 4] and attr1 != attr2:
self.pronged_atk[attr2] = self.atk[attr2] * 1.5 ** self.awakenings[26][2]
def _set_stats_at_level_(self, level):
"""
Modify all stats according to level.
:param level: Level the monster will be set to.
"""
self.current_level = level
self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)
self.hp += self.hp_bonus
self.hp_base = self.hp
self._set_stats_with_pluses_('hp', self.hp_plus)
self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale)
self.base_atk += self.base_atk_bonus
self.base_base_atk = self.base_atk
self._set_stats_with_pluses_('atk', self.base_atk_plus)
self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale)
self.rcv += self.rcv_bonus
self.rcv_base = self.rcv
self._set_stats_with_pluses_('rcv', self.rcv_plus)
def _use_growth_formula(self, min_value, max_value, scale):
"""
Applies the growth formula to get the values of the specified stat at the current level.
:param min_value: the minimum value of the stat
:param max_value: the maximum value of the stat
:param scale: the scaling rate of the stat
:return: the value of the stat at the current level
"""
value = ((self.current_level - 1) / (self.max_level - 1)) ** scale
value *= (max_value - min_value)
value += min_value
return value
def _set_stats_with_pluses_(self, type, num):
"""
Modify the specified stat according to the specified amount of pluses
:param type: 'hp', 'atk', or 'rcv'
:param num: 0-99, the number of pluses for the specified stat
"""
if type == 'hp':
self.hp_plus = num
self.hp = self.hp_base + self.hp_plus * 10
elif type == 'atk':
self.base_atk_plus = num
self.base_atk = self.base_base_atk + self.base_atk_plus * 5
self._set_atk_(self.attr_main, self.attr_sub)
self._set_pronged_atk_(self.attr_main, self.attr_sub)
elif type == 'rcv':
self.rcv_plus = num
self.rcv = self.rcv_base + self.rcv_plus * 3
| Python | 272 | 41.952206 | 120 | /PAD_Monster.py | 0.558076 | 0.538903 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
from Calculator_Screen import CalculatorScreen
from Board_Screen import BoardScreen
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
from PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QSplitter, QAction,
QFileDialog, QMainWindow, QStackedWidget, QSplitter)
from PyQt5.QtCore import Qt
import os
import json
from functools import partial
class PADScreen(QStackedWidget):
def __init__(self, main_window):
"""
Initialize the PADScreen Class
:param gui: the main interface which will hold all of our widgets
:param main_window: the main window widget which will hold our menu bar
"""
super().__init__()
# create an open file and save file action for our menu bar and connects them to their
# respective functions
open_file = QAction('Load Team...', main_window)
open_file.setShortcut('Ctrl+O')
open_file.triggered.connect(partial(self._show_dialog_box_, 'Open', main_window))
save_file = QAction('Save Team...', main_window)
save_file.setShortcut('Ctrl+S')
save_file.triggered.connect(partial(self._show_dialog_box_, 'Save', main_window))
clear_team = QAction('New Team', main_window)
clear_team.setShortcut('Ctrl+N')
clear_team.triggered.connect(self.__clear__team__)
# create our menu bar, attach it to our main window and add to it our open and save actions
menubar = main_window.menuBar()
file_menu = menubar.addMenu('&File')
file_menu.addAction(open_file)
file_menu.addAction(save_file)
file_menu.addAction(clear_team)
# create the widget containing the first page of the GUI, the calculator page
self.calculator_screen = QWidget(self)
# use custom calculator layout for the widget's layout
self.calculator_screen_layout = CalculatorScreen(self)
self.calculator_screen.setLayout(self.calculator_screen_layout)
# initialize a variable to hold the PADTeam
self.pad_team = self.calculator_screen_layout.pad_team
self.team = self.calculator_screen_layout.team
# create the widget containing the second page of the GUI, the board page
self.board_screen = QWidget(self)
# use custom board layout for the widget's layout
self.board_screen_layout = BoardScreen(self, self.team, self.pad_team)
self.board_screen.setLayout(self.board_screen_layout)
# initially hide this page until the next page button is pressed
#self.board_screen.hide()
# create the bottom widget for the GUI which will contain the page turning buttons
self.page_turner = QWidget(main_window)
page_turner_layout = QHBoxLayout(main_window)
self.page_turner.setLayout(page_turner_layout)
self.turn_left = QPushButton('<', main_window)
page_turner_layout.addWidget(self.turn_left)
page_turner_layout.addStretch()
page_turner_layout.addStretch()
self.turn_right = QPushButton('>', main_window)
page_turner_layout.addWidget(self.turn_right)
# initially hide the button to turn left as the GUI initializes on page 1
self.turn_left.hide()
self.page_one_splitter = QSplitter(Qt.Vertical)
self.page_one_splitter.addWidget(self.calculator_screen)
self.page_one_splitter.addWidget(self.page_turner)
self.addWidget(self.page_one_splitter)
#self.setCurrentWidget(self.page_one_splitter)
self.page_two_splitter = QSplitter(Qt.Vertical)
self.page_two_splitter.addWidget(self.board_screen)
#self.page_two_splitter.addWidget(page_turner)
self.addWidget(self.page_two_splitter)
#self.setCurrentWidget(self.page_two_splitter)
self._init_screen_()
def _init_screen_(self):
"""
Set right click button to connect to the second page
:param gui: the main interface all the widgets will be attached to
"""
self.turn_right.clicked.connect(self._go_to_board_screen_)
def _go_to_board_screen_(self, clicked):
"""
Set the active screen to the second page and hide the first page when the respective
button is clicked. Also hide the right button, show the left button and connect the
left button to the first page.
:param gui: same.
:param clicked: the clicking event, useless.
"""
self.board_screen_layout.team = self.calculator_screen_layout.team
self.board_screen_layout.team_totals = self.calculator_screen_layout.pad_team
self.board_screen_layout.set__team(self.board_screen_layout.team)
self.setCurrentWidget(self.page_two_splitter)
self.page_two_splitter.addWidget(self.page_turner)
#self.board_screen.show()
#self.calculator_screen.hide()
self.turn_right.hide()
self.turn_left.show()
self.turn_left.clicked.connect(self._go_to_calculator_screen_)
def _go_to_calculator_screen_(self, clicked):
"""
Set the active screen to the first page and hide the second page when the respective
button is clicked. Also hide the left button, show the right button and connect the
right button to the second page.
:param gui: same.
:param clicked: useless clicking event.
"""
self._init_screen_()
self.turn_left.hide()
self.turn_right.show()
self.turn_right.clicked.connect(self._go_to_board_screen_)
self.page_one_splitter.addWidget(self.page_turner)
self.setCurrentWidget(self.page_one_splitter)
#self.board_screen.hide()
#self.calculator_screen.show()
def _show_dialog_box_(self, stringname, gui):
"""
If the stringname is 'Open', open a dialog where the user can select a team to load
into the line edits.
If the stringname is 'Save', open a dialog where the user can save the names of the
team members into a txt file.
:param stringname: 'Open' or 'Save', the corresponding menu action will contain the
key stringname.
:param gui: same.
"""
if stringname == 'Open':
filename = QFileDialog.getOpenFileName(gui, 'Load Team...', os.path.join('saved teams'),
'Text files (*.txt)')
# if not empty string and has the appropriate subscript
if filename[0] and filename[0].endswith('txt'):
with open(os.path.realpath(filename[0]), 'r') as file:
json_content = json.loads(file.read())
# decode the names in case of unicode strings like the infinity sign
#content_decoded = content.decode('utf-8')
#monster_names = content_decoded.splitlines()
for monster in range(6):
# decode the name in case of unicode strings like the infinity sign
# name = json_content[monster]['name'].decode('utf-8')
name = json_content[monster]['name']
hp_plus = json_content[monster]['hp plus']
atk_plus = json_content[monster]['atk plus']
rcv_plus = json_content[monster]['rcv plus']
level = json_content[monster]['level']
# enter the names into the line edits
self.calculator_screen_layout.line_edits[monster].setText(name)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'hp', hp_plus)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'atk', atk_plus)
self.calculator_screen_layout._on_plus_value_activated_(monster, 'rcv', rcv_plus)
self.calculator_screen_layout._on_level_activated_(monster, level)
if stringname == 'Save':
filename = QFileDialog.getSaveFileName(gui, 'Save Team...', os.path.join('saved teams'),
'Text files (*.txt')
# if not empty string
if filename[0]:
# create json file
json_file = [{} for monster in range(6)]
#monster_names = ''
for monster in range(6):
# copy the team member's name to a variable
monster_name = self.calculator_screen_layout.team[monster].name
# copy the team member's pluses to variables
hp_plus = self.calculator_screen_layout.team[monster].hp_plus
atk_plus = self.calculator_screen_layout.team[monster].base_atk_plus
rcv_plus = self.calculator_screen_layout.team[monster].rcv_plus
# copy the team member's current level to a variable
current_level = self.calculator_screen_layout.team[monster].current_level
#monster_names += monster_name+'\n'
# encode the string to be saved for symbols like the infinity sign
#monster_name_encoded = monster_name.encode('utf8', 'replace')
json_file[monster]['name'] = monster_name
json_file[monster]['hp plus'] = hp_plus
json_file[monster]['atk plus'] = atk_plus
json_file[monster]['rcv plus'] = rcv_plus
json_file[monster]['level'] = current_level
with open(os.path.realpath(filename[0]+'.txt'), 'w') as file:
json.dump(json_file, file)
def __clear__team__(self):
for index in range(6):
self.calculator_screen_layout.line_edits[index].clear()
self.calculator_screen_layout.team = [PADMonster() for monster in range(6)]
self.calculator_screen_layout.pad_team = PADTeam(self.calculator_screen_layout.team)
for index in range(6):
self.calculator_screen_layout._set_labels_(self.calculator_screen_layout.team[index], index)
# self.calculator_screen = QWidget(gui)
# self.calculator_screen_layout = CalculatorScreen(gui)
# self.calculator_screen.setLayout(self.calculator_screen_layout)
# self.active_screen = self.calculator_screen | Python | 214 | 47.626167 | 104 | /PADScreen.py | 0.620471 | 0.618837 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
import os
from PAD_Monster import PADMonster
class PADTeam:
def __init__(self, team):
"""
Initializes the PADTeam Class.
:param team: an array containing 6 PADMonster Classes
"""
# self.team = [PADMonster() for monster in range(6)] -> how the team should look
self.team = team
# below we initialize the variables that will be containing the team stats.
self.hp = 0
# for all atk arrays: [fire atk, water atk, wood atk, light atk, dark atk]
self.atk = [0, 0, 0, 0, 0]
# for all base atks, it's the sum of each value in the array
self.base_atk = 0
self.pronged_atk = [0, 0, 0, 0, 0]
self.base_pronged_atk = 0
self.rcv = 0
# below we initialize the modified stats, the team's total stats after being
# multiplied by the effects of the two leader skills
self.hp_modified = 0
self.atk_modified = [0, 0, 0, 0, 0]
self.base_atk_modified = 0
self.pronged_atk_modified = [0, 0, 0, 0, 0]
self.base_pronged_atk_modified = 0
self.rcv_modified = 0
# a string that will contain all our the teams' awakenings
self.awakenings = ''
# the leader skills effects: [hp multiplied by, atk multiplied by, rcv multiplied by]
self.leader1_effects = [1, 1, 1]
self.leader2_effects = [1, 1, 1]
# store how each monster's stats will be modified as in if the monster satisfies the
# leader skill's conditions
self.stats_modified_by = [[1, 1, 1] for monster in range(6)]
# set all the variables according to the team input
self.__set__team__hp()
self.__set__team__rcv()
self.__set__team__atk()
self.__set__team__base__atk()
self.__set__team__awakenings()
self.__set__modified__stats__()
def __set__team__hp(self):
self.hp = 0
for monster in range(6):
self.hp += self.team[monster].hp
def __set__team__rcv(self):
self.rcv = 0
for monster in range(6):
self.rcv += self.team[monster].rcv
def __set__team__awakenings(self):
self.awakenings = ''
for awakening in range(len(self.team[0].awakenings)):
# count stores how many instances of a specific awakening are contained in the team
count = 0
for monster in range(6):
if self.team[monster].awakenings[awakening][2] > 0:
count += self.team[monster].awakenings[awakening][2]
if count > 0:
# if the team has an awakening, save it to the string and add the count number
self.awakenings += self.team[0].awakenings[awakening][0]+': '+str(count)+'\n'
def __set__team__atk(self):
self.atk = [0, 0, 0, 0, 0]
self.pronged_atk = [0, 0, 0, 0, 0]
for attr in range(5):
for monster in self.team:
self.atk[attr] += monster.atk[attr]
self.pronged_atk[attr] += monster.pronged_atk[attr]
def __set__team__base__atk(self):
self.base_atk = 0
self.base_pronged_atk = 0
for monster in self.team:
self.base_atk += monster.atk[monster.attr_main]
self.base_pronged_atk += monster.pronged_atk[monster.attr_main]
def __set__modified__stats__(self):
self.stats_modified_by = [[1, 1, 1] for monster in range(6)]
# the first and last team members of the team are considered the leaders and we use
# their respective leader skills.
for index in [0, 5]:
# if the leader skill isn't ""
if self.team[index].leader_skill_name:
# the skill effect will look [hp modified by, atk modified by, rcv modified by]
# an additional 4th index exists if there's a conditional which will look like:
# [hp * by, atk * by, rcv * by, ['elem' or 'type', # associated with elem or type]]
if len(self.team[index].leader_skill_effect) > 3:
# if fourth array exists, save whether the conditional asks for an element
# or type in attribute variable
# and save the # associated in the num variable
attribute = self.team[index].leader_skill_effect[3][0]
num = self.team[index].leader_skill_effect[3][1]
# check if each monster in the team satisfies the elem or type conditional
# if true, the stats modified index for that monster will be multiplied appropriately
if attribute == "elem":
for monster in range(6):
if self.team[monster].attr_main == num or self.team[monster].attr_sub == num:
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
elif attribute == "type":
for monster in range(6):
if self.team[monster].type_main == num or self.team[monster].type_sub == num:
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
# if there isn't a 4th index conditional, just multiply all of the stats modified indexes
# by the appropriate skill effect amounts
else:
for monster in range(6):
self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]
self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]
self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]
hp = 0
base_atk = 0
atk = [0, 0, 0, 0, 0]
base_pronged_attack = 0
pronged_atk = [0, 0, 0, 0, 0]
rcv = 0
# modify each team stat according to the leader skills' effects and save them to their respective
# variables.
for monster in range(6):
hp += self.team[monster].hp * self.stats_modified_by[monster][0]
rcv += self.team[monster].rcv * self.stats_modified_by[monster][2]
main_attr = self.team[monster].attr_main
base_atk += self.team[monster].atk[main_attr] * self.stats_modified_by[monster][1]
base_pronged_attack += self.team[monster].pronged_atk[main_attr] * self.stats_modified_by[monster][1]
for attr in range(5):
atk[attr] += self.team[monster].atk[attr] * self.stats_modified_by[monster][1]
pronged_atk[attr] += self.team[monster].pronged_atk[attr] * self.stats_modified_by[monster][1]
self.hp_modified = hp
self.atk_modified = atk
self.base_atk_modified = base_atk
self.pronged_atk_modified = pronged_atk
self.base_pronged_atk_modified = base_pronged_attack
self.rcv_modified = rcv
| Python | 149 | 49.10067 | 113 | /PAD_Team.py | 0.564903 | 0.548292 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
from PyQt5.QtWidgets import (QVBoxLayout, QWidget, QLabel, QGridLayout, QSplitter,
QPushButton, QHBoxLayout)
from PyQt5.QtCore import Qt, QMimeData
from PyQt5.QtGui import QPixmap, QDrag
import os
from PAD_Monster import PADMonster
from PAD_Team import PADTeam
from functools import partial
class BoardScreen(QVBoxLayout):
default_team = [PADMonster() for monster in range(6)]
default_team_totals = PADTeam(default_team)
def __init__(self, gui, team=default_team, team_totals=default_team_totals):
super().__init__()
self.team = team
self.team_totals = team_totals
self.damage_array = [[{'main attribute': 0, 'sub attribute': 0} for col in range(2)] for row in range(6)]
self.__init__screen__(gui, self.team, self.team_totals)
def __init__screen__(self, gui, team, team_totals):
# DAMAGE SCREEN
damage_screen = QWidget()
damage_screen_layout = QGridLayout()
damage_screen.setLayout(damage_screen_layout)
self.addWidget(damage_screen)
self.damage_labels = [[QLabel(gui) for column in range(2)] for row in range(6)]
for row in range(6):
for column in range(2):
damage_screen_layout.addWidget(self.damage_labels[row][column], row, column)
# RECOVERY LABEL
self.hp_recovered = QLabel(gui)
self.addWidget(self.hp_recovered)
# BOARD
board = QWidget()
board_layout = QGridLayout()
board.setLayout(board_layout)
self.addWidget(board)
# TEAM IMAGES
self.team_labels = []
for index in range(6):
label = QLabel(gui)
self.team_labels.append(label)
board_layout.addWidget(label, 0, index)
board_layout.setAlignment(label, Qt.AlignHCenter)
self.set__team(team)
# BOARD
self.board_labels = [[PADLabel(gui) for column in range(8)] for row in range(8)]
# positions = [(i+1, j) for i in range(8) for j in range(8)]
light_brown = 'rgb(120, 73, 4)'
dark_brown = 'rgb(54, 35, 7)'
color = dark_brown
for row in self.board_labels:
for column in row:
row_index = self.board_labels.index(row)
col_index = row.index(column)
column.setStyleSheet("QLabel { background-color: %s }" % color)
if color == dark_brown and (col_index+1) % 8 != 0:
color = light_brown
elif color == light_brown and (col_index+1) % 8 != 0:
color = dark_brown
board_layout.addWidget(column, row_index+1, col_index)
#for position, label in zip(positions, self.board_labels):
# board_layout.addWidget(label, *position)
for row in range(9):
board_layout.setRowStretch(row, 1)
for column in range(8):
board_layout.setColumnStretch(column, 1)
self.board_array = []
self.__create__board___(5, 6)
# CALCULATE DAMAGE BUTTON
calculate_damage_button = QPushButton('Calculate Damage', gui)
calculate_damage_button.clicked.connect(partial(self.calculate_damage, team, team_totals))
self.addWidget(calculate_damage_button)
# ORBS
# orb_wrapper = QWidget(gui)
# orb_wrapper_layout = QHBoxLayout()
# orb_wrapper.setLayout(orb_wrapper_layout)
# elements = ['fire', 'water', 'wood', 'light', 'dark']
# for element in elements:
# orb = PADIcon(gui)
# orb.setPixmap(QPixmap(os.path.join('icons')+'\\'+element+'.png'))
# orb_wrapper_layout.addWidget(orb)
#
# self.addWidget(orb_wrapper)
def __create__board___(self, row, column):
self.board_array = [['' for column in range(column)] for row in range(row)]
for row_index in self.board_labels:
for col_label in row_index:
col_label.hide()
for x in range(row):
for y in range(column):
self.board_labels[x][y].show()
def calculate_damage(self, team=default_team, team_totals=default_team_totals):
for row in range(len(self.board_array)):
for column in range(len(self.board_array[0])):
self.board_array[row][column] = self.board_labels[row][column].element
all_positions = set()
# 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = heart
elemental_damage = [{'fire': 0, 'water': 0, 'wood': 0, 'light': 0, 'dark': 0}
for monster in range(6)]
total_hp_recovered = 0
combo_count = 0
colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'pink']
attribute_translator = ['fire', 'water', 'wood', 'light', 'dark', 'heart']
for row in range(len(self.board_array)):
for column in range(len(self.board_array[0])):
combo_length, positions = self.__find__combos__recursively__(self.board_array, row, column)
if combo_length >= 3 and not next(iter(positions)) in all_positions and self.board_array[row][column]:
print(str(self.board_array[row][column])+":",combo_length,'orb combo.')
attribute = attribute_translator.index(self.board_array[row][column])
if attribute != 5:
for monster in range(6):
if combo_length == 4:
damage = team[monster].pronged_atk[attribute] * 1.25
else:
damage = team[monster].atk[attribute] * (1+0.25*(combo_length-3))
elemental_damage[monster][self.board_array[row][column]] += damage
else:
total_rcv = 0
for monster in range(6):
total_rcv += team[monster].rcv
total_hp_recovered += total_rcv * (1+0.25*(combo_length-3))
print(total_hp_recovered)
print(total_rcv)
all_positions |= positions
combo_count += 1
combo_multiplier = 1+0.25*(combo_count-1)
for monster in range(6):
main_attribute = attribute_translator[team[monster].attr_main]
sub_attribute = ''
if team[monster].attr_sub or team[monster].attr_sub == 0:
sub_attribute = attribute_translator[team[monster].attr_sub]
if sub_attribute:
if main_attribute != sub_attribute:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier
sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier
else:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier * (10/11)
sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier * (1/11)
else:
main_damage = elemental_damage[monster][main_attribute] * combo_multiplier
sub_damage = 0
self.damage_labels[monster][0].setText(str(main_damage))
self.damage_labels[monster][0].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_main])
self.damage_labels[monster][1].setText(str(sub_damage))
if team[monster].attr_sub or team[monster].attr_sub == 0:
self.damage_labels[monster][1].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_sub])
total_hp_recovered *= combo_multiplier
self.hp_recovered.setText(str(total_hp_recovered))
self.hp_recovered.setStyleSheet("QLabel { color : %s }" % colors[5])
def set__team(self, team):
for label, member in zip(self.team_labels, team):
try:
image = QPixmap(os.path.join('images')+'/'+member.name+'.png')
image.scaled(75, 75)
label.setPixmap(image)
except Exception: pass
def __find__combos__recursively__(self, array, row, column):
combo_length = 0
positions = set()
row_length = self.checkIndexInRow(array, row, column)
if row_length >= 3:
more_length, more_positions = self.__find__combos__recursively__(array, row, column+row_length-1)
combo_length += row_length + more_length - 1
positions |= more_positions
for col_index in range(row_length):
positions.add((row, column+col_index))
column_length = self.checkIndexInColumn(array, row, column)
if column_length >= 3:
more_length, more_positions = self.__find__combos__recursively__(array, row+column_length-1, column)
combo_length += column_length + more_length - 1
positions |= more_positions
for row_index in range(column_length):
positions.add((row+row_index, column))
if row_length >= 3 and column_length >= 3:
return combo_length - 1, positions
elif row_length < 3 and column_length < 3:
return 1, positions
return combo_length, positions
def checkIndexInRow(self, array, row, col_index):
combo_length = 0
if array[row].count(array[row][col_index]) >= 3:
if col_index > 0:
if array[row][col_index - 1] != array[row][col_index]:
combo_length += self.recurseThroughRow(array, row, col_index)
else:
combo_length += self.recurseThroughRow(array, row, col_index)
return combo_length
def recurseThroughRow(self, array, row, col_index, count=1):
if array[row][col_index + count] == array[row][col_index]:
count += 1
if col_index + count < len(array[row]):
return self.recurseThroughRow(array, row, col_index, count)
else:
return count
else:
return count
def checkIndexInColumn(self, array, row_index, col):
elements_in_column = []
combo_length = 0
for index in range(row_index, len(array)):
elements_in_column.append(array[index][col])
if elements_in_column.count(array[row_index][col]) >= 3:
if row_index > 0:
if array[row_index][col] != array[row_index - 1][col]:
combo_length += self.recurseThroughCol(array, row_index, col)
else:
combo_length += self.recurseThroughCol(array, row_index, col)
return combo_length
def recurseThroughCol(self, array, row_index, col, count=1):
if array[row_index + count][col] == array[row_index][col]:
count += 1
if row_index + count < len(array):
return self.recurseThroughCol(array, row_index, col, count)
else:
return count
else:
return count
class PADLabel(QLabel):
def __init__(self, gui):
super().__init__(gui)
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.setScaledContents(True)
self.color_counter = -1
self.colors = ['fire', 'water', 'wood', 'light', 'dark', 'heart']
self.element = ''
self.setFixedSize(75, 75)
def mousePressEvent(self, click):
if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):
if self.color_counter != 5:
self.color_counter += 1
else:
self.color_counter = 0
self.element = self.colors[self.color_counter]
icon = QPixmap(os.path.join('icons')+'/'+self.element+'.png')
icon.scaled(75, 75)
self.setPixmap(icon)
def dragEnterEvent(self, event):
if event.mimeData().hasImage():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
image = event.mimeData().imageData().value<QImage>()
self.setPixmap(image)
class PADIcon(QLabel):
def __init__(self, gui):
super().__init__()
self.gui = gui
self.setMouseTracking(True)
self.location = self.rect()
def mousePressEvent(self, click):
if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):
print('On it!')
drag = QDrag(self.gui)
mimeData = QMimeData()
mimeData.setImageData(self.pixmap().toImage())
drag.setMimeData(mimeData)
drag.setPixmap(self.pixmap())
dropAction = drag.exec() | Python | 288 | 43.246529 | 118 | /Board_Screen.py | 0.561372 | 0.550306 |
acheng6845/PuzzleSolver | refs/heads/master | __author__ = 'Aaron'
# Class Description:
# Update our monsters.txt file and our images folder
from urllib3 import urllib3
import shutil
import os
import json
class image_updater():
def __init__(self):
# update monsters.txt here:
self.json_file = open(os.path.realpath('./monsters.txt'), 'r')
self.json_object = json.loads(self.json_file.read())
path = os.path.realpath('images')
team = ['Sparkling Goddess of Secrets, Kali', 'Holy Night Kirin Princess, Sakuya',
'Soaring Dragon General, Sun Quan', 'divine law goddess, valkyrie rose']
for x in range(len(self.json_object)):
#for x in range(1):
url = 'https://padherder.com'+self.json_object[x]["image60_href"]
#print(url)
name = self.json_object[x]["name"]
if name in team:
#if name.islower():
# name += 'chibi'
request = urllib3.PoolManager().request('GET', url)
#print(os.path.realpath('images2'))
#is_accessible = os.access(path, os.F_OK)
#print(is_accessible)
# if the directory doesn't exist, create the directory - too risky
#if is_accessible == False:
# os.makedirs(os.path.realpath('images2'))
os.chdir(path)
#print(path)
#print(path+'\\'+name+'.png')
if os.access(path+'/'+name+'.png', os.F_OK) == False:
with open(os.path.join(path+'/'+name+'.png'), 'wb') as file:
file.write(request.data)
request.release_conn()
else:
print(name+'.png already exists.')
if __name__ == '__main__':
updater = image_updater() | Python | 59 | 30.016949 | 90 | /image_updater.py | 0.522143 | 0.517769 |
lonce/dcn_soundclass | refs/heads/master | """
eg
python testPickledModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/state.pickle
"""
import tensorflow as tf
import numpy as np
import pickledModel
from PIL import TiffImagePlugin
from PIL import Image
# get args from command line
import argparse
FLAGS = None
VERBOSE=False
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('pickleFile', type=str, help='stored graph' )
FLAGS, unparsed = parser.parse_known_args()
k_freqbins=257
k_width=856
styg = pickledModel.load(FLAGS.pickleFile)
print(' here we go ........')
def soundfileBatch(slist) :
return ([pickledModel.loadImage(name) for name in slist ])
#just test the validation set
#Flipping and scaling seem to have almost no effect on the clasification accuracy
rimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',
'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',
'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',
'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',
'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',
'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',
'data2/validate/101 - Dog/5-203128-A._4_.tif',
'data2/validate/101 - Dog/5-203128-B._5_.tif',
'data2/validate/101 - Dog/5-208030-A._9_.tif',
'data2/validate/101 - Dog/5-212454-A._4_.tif',
'data2/validate/101 - Dog/5-213855-A._4_.tif',
'data2/validate/101 - Dog/5-217158-A._2_.tif',
'data2/validate/101 - Dog/5-231762-A._1_.tif',
'data2/validate/101 - Dog/5-9032-A._12_.tif',
])
im=np.empty([1,1,k_width,k_freqbins ])
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
with tf.Session() as sess:
predictions=[]
sess.run ( tf.global_variables_initializer ())
#print('ok, all initialized')
if 0 :
print ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for v in all_vars:
v_ = sess.run(v)
print(v_)
if 0 :
for v in ["s_w1:0", "s_b1:0", "s_w2:0", "s_b2:0", "s_W_fc1:0", "s_b_fc1:0", "s_W_fc2:0", "s_b_fc2:0"] :
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
if 1 :
for v in ["s_h1:0"] :
#im = np.reshape(np.transpose(rimages[6]), [1,k_width*k_freqbins ])
im=rimages[6]
print('assigning input variable an image with shape ' + str(im.shape))
sess.run(styg["X"].assign(im)) #transpose to make freqbins channels
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
print('predictions are : ')
for im_ in rimages :
#im = np.reshape(np.transpose(im_), [1,k_width*k_freqbins ])
im=im_
sess.run(styg["X"].assign(im)) #transpose to make freqbins channels
prediction = sess.run(styg["softmax_preds"])
print(str(prediction[0]))
#predictions.extend(prediction[0])
#pickledModel.save_image(np.transpose(im, [0,3,2,1])[0,:,:,0],'fooimage.tif')
pickledModel.save_image(im[0,:,:,:],'fooimage.tif')
| Python | 102 | 31.382353 | 105 | /testPickledModel.py | 0.679177 | 0.602603 |
lonce/dcn_soundclass | refs/heads/master | import os
import numpy as np
import matplotlib.pyplot as plt
# https://github.com/librosa/librosa
import librosa
import librosa.display
import scipy
from PIL import TiffImagePlugin
from PIL import Image
import tiffspect
# Set some project parameters
K_SR = 22050
K_FFTSIZE = 512 # also used for window length where that parameter is called for
K_HOP = 128
K_DUR = 5.0 # make all files this duration
K_FRAMEMULTIPLEOF = 4 # some programs like to have convinent dimensions for conv and decimation
# the last columns of a matrix are removed if necessary to satisfy
# 1 means any number of frames will work
# location of subdirectories of ogg files organized by category
K_OGGDIR = '/home/lonce/tflow/DATA-SETS/ESC-50'
# location to write the wav files (converted from ogg)
K_WAVEDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-wave'
# location to write the spectrogram files (converted from wave files)
K_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'
#===============================================
def get_subdirs(a_dir):
""" Returns a list of sub directory names in a_dir """
return [name for name in os.listdir(a_dir)
if (os.path.isdir(os.path.join(a_dir, name)) and not (name.startswith('.')))]
def listDirectory(directory, fileExtList):
"""Returns list of file info objects in directory that extension in the list fileExtList - include the . in your extension string"""
fnameList = [os.path.normcase(f)
for f in os.listdir(directory)
if (not(f.startswith('.')))]
fileList = [os.path.join(directory, f)
for f in fnameList
if os.path.splitext(f)[1] in fileExtList]
return fileList , fnameList
def dirs2labelfile(parentdir, labelfile):
"""takes subdirectories of parentdir and writes them, one per line, to labelfile"""
namelist = get_subdirs(parentdir)
#with open(labelfile, mode='wt', encoding='utf-8') as myfile:
with open(labelfile, mode='wt') as myfile:
myfile.write('\n'.join(namelist))
# ===============================================
def stereo2mono(data) :
""" Combine 2D array into a single array, averaging channels """
""" Deprecated, since we use librosa for this now. """
print('converting stereo data of shape ' + str(data.shape))
outdata=np.ndarray(shape=(data.shape[0]), dtype=np.float32)
if data.ndim != 2 :
print('You are calling stero2mono on a non-2D array')
else :
print(' converting stereo to mono, with outdata shape = ' + str(outdata.shape))
for idx in range(data.shape[0]) :
outdata[idx] = (data[idx,0]+data[idx,1])/2
return outdata
# ===============================================
def esc50Ogg2Wav (topdir, outdir, dur, srate) :
"""
Creates regularlized wave files for the ogg files in the ESC-50 dataset.
Creates class folders for the wav files in outdir with the same structure found in topdir.
Parameters
topdir - the ESC-50 dir containing class folders.
outdir - the top level directory to write wave files to (written in to class subfolders)
dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate
srate - input files will be resampled to srate as they are read in before being saved as wav files
"""
sample_length = int(dur * srate)
try:
os.stat(outdir) # test for existence
except:
os.mkdir(outdir) # create if necessary
subdirs = get_subdirs(topdir)
for subdir in subdirs :
try:
os.stat(outdir + '/' + subdir) # test for existence
except:
os.mkdir(outdir + '/' + subdir) # create if necessary
print('creating ' + outdir + '/' + subdir)
fullpaths, _ = listDirectory(topdir + '/' + subdir, '.ogg')
for idx in range(len(fullpaths)) :
fname = os.path.basename(fullpaths[idx])
# librosa.load resamples to sr, clips to duration, combines channels.
audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur) # resamples if necessary (some esc-50 files are in 48K)
# just checking .....
if (samplerate != srate) :
print('You got a sound file ' + subdir + '/' + fname + ' with sample rate ' + str(samplerate) + '!')
print(' ********* BAD SAMPLE RATE ******** ')
if (audiodata.ndim != 1) :
print('You got a sound file ' + subdir + '/' + fname + ' with ' + str(audiodata.ndim) + ' channels!')
audiodata = stereo2mono(audiodata)
if (len(audiodata) > sample_length) :
print('You got a long sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')
audiodata = np.resize(audiodata, sample_length)
# print(' ..... and len(audiodata) = ' + str(len(audiodata)) + ', while sample_length is sposed to be ' + str(sample_length))
print('trimming data to shape ' + str(audiodata.shape))
if (len(audiodata) < sample_length) :
print('You got a short sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')
audiodata = np.concatenate([audiodata, np.zeros((sample_length-len(audiodata)))])
print(' zero padding data to shape ' + str(audiodata.shape))
# write the file out as a wave file
librosa.output.write_wav(outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.wav', audiodata, samplerate)
# ===============================================
def wav2spect(fname, srate, fftSize, fftHop, dur=None, showplt=False, dcbin=True, framesmulitpleof=1) :
try:
audiodata, samplerate = librosa.load(fname, sr=srate, mono=True, duration=dur)
except:
print('can not read ' + fname)
return
S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))
if (dcbin == False) :
S = np.delete(S, (0), axis=0) # delete freq 0 row
#note: a pure DC input signal bleeds into bin 1, too.
#trim the non-mulitple fat if necessary
nr, nc = S.shape
fat = nc%framesmulitpleof
for num in range(0,fat):
S = np.delete(S, (nc-1-num), axis=1)
D = librosa.amplitude_to_db(S, ref=np.max)
if showplt : # Dangerous for long runs - it opens a new figure for each file!
librosa.display.specshow(D, y_axis='linear', x_axis='time', sr=srate, hop_length=fftHop)
plt.colorbar(format='%+2.0f dB')
plt.title(showplt)
plt.show(block=True)
return D
# ===============================================
def esc50Wav2Spect(topdir, outdir, dur, srate, fftSize, fftHop, showplt=False, dcbin=True) :
"""
Creates spectrograms for subfolder-labeled wavfiles.
Creates class folders for the spectrogram files in outdir with the same structure found in topdir.
Parameters
topdir - the dir containing class folders containing wav files.
outdir - the top level directory to write wave files to (written in to class subfolders)
dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate
srate - input files will be resampled to srate as they are read in before being saved as wav files
"""
try:
os.stat(outdir) # test for existence
except:
os.mkdir(outdir) # create if necessary
subdirs = get_subdirs(topdir)
count = 0
for subdir in subdirs :
try:
os.stat(outdir + '/' + subdir) # test for existence
except:
os.mkdir(outdir + '/' + subdir) # create if necessary
print('creating ' + outdir + '/' + subdir)
fullpaths, _ = listDirectory(topdir + '/' + subdir, '.wav')
for idx in range(len(fullpaths)) :
fname = os.path.basename(fullpaths[idx])
# librosa.load resamples to sr, clips to duration, combines channels.
#
#try:
# audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur)
#except:
# print('can not read ' + fname)
#
#S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))
#
#if (! dcbin) :
# S = np.delete(S, (0), axis=0) # delete freq 0 row
##print('esc50Wav2Spect" Sfoo max is ' + str(np.max(Sfoo)) + ', and Sfoo sum is ' + str(np.sum(Sfoo)) + ', and Sfoo min is ' + str(np.min(Sfoo)))
#
#
#D = librosa.amplitude_to_db(S, ref=np.max)
D = wav2spect(fullpaths[idx], srate, fftSize, fftHop, dur=dur, dcbin=True, showplt=False, framesmulitpleof=K_FRAMEMULTIPLEOF)
#plt.title(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0])
tiffspect.logSpect2Tiff(D, outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.tif')
print(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0])
count +=1
# ===============================================
# DO IT
#esc50Ogg2Wav(K_OGGDIR, K_WAVEDIR, K_DUR, K_SR)
#esc50Wav2Spect(K_WAVEDIR, K_SPECTDIR, K_DUR, K_SR, K_FFTSIZE, K_HOP, dcbin=True)
dirs2labelfile(K_SPECTDIR, K_SPECTDIR + '/labels.text')
| Python | 214 | 44.752335 | 158 | /utils/ESC50_Convert.py | 0.579598 | 0.57102 |
lonce/dcn_soundclass | refs/heads/master | #
#
#Morgans great example code:
#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
#
# GitHub utility for freezing graphs:
#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py
#
#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants
import tensorflow as tf
import numpy as np
#global variables
g_st_saver=None
g_chkptdir=None
g_trainedgraph=None
VERBOSE=1
#-------------------------------------------------------------
def load(meta_model_file, restore_chkptDir) :
global g_st_saver
global g_chkptdir
global g_trainedgraph
g_st_saver = tf.train.import_meta_graph(meta_model_file)
# Access the graph
g_trainedgraph = tf.get_default_graph()
with tf.Session() as sess:
g_chkptdir=restore_chkptDir # save in global for use during initialize
#g_st_saver.restore(sess, tf.train.latest_checkpoint(restore_chkptDir))
return g_trainedgraph, g_st_saver
def initialize_variables(sess) :
g_st_saver.restore(sess, tf.train.latest_checkpoint(g_chkptdir))
tf.GraphKeys.USEFUL = 'useful'
var_list = tf.get_collection(tf.GraphKeys.USEFUL)
#print('var_list[3] is ' + str(var_list[3]))
#JUST WANTED TO TEST THIS TO COMPARE TO STYLE MODEL CODE
# Now get the values of the trained graph in to the new style graph
#sess.run((g_trainedgraph.get_tensor_by_name("w1:0")).assign(var_list[3]))
#sess.run(g_trainedgraph.get_tensor_by_name("b1:0").assign(var_list[4]))
#sess.run(g_trainedgraph.get_tensor_by_name("w2:0").assign(var_list[5]))
#sess.run(g_trainedgraph.get_tensor_by_name("b2:0").assign(var_list[6]))
#sess.run(g_trainedgraph.get_tensor_by_name("W_fc1:0").assign(var_list[7]))
#sess.run(g_trainedgraph.get_tensor_by_name("b_fc1:0").assign(var_list[8]))
#sess.run(g_trainedgraph.get_tensor_by_name("W_fc2:0").assign(var_list[9]))
#sess.run(g_trainedgraph.get_tensor_by_name("b_fc2:0").assign(var_list[10]))
| Python | 62 | 30.709677 | 102 | /trainedModel.py | 0.712544 | 0.695277 |
lonce/dcn_soundclass | refs/heads/master | """
eg
python testModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/my-model.meta logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/checkpoints/
"""
import tensorflow as tf
import numpy as np
import trainedModel
from PIL import TiffImagePlugin
from PIL import Image
# get args from command line
import argparse
FLAGS = None
VERBOSE=False
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('metamodel', type=str, help='stored graph' )
parser.add_argument('checkptDir', type=str, help='the checkpoint directory from where the latest checkpoint will be read to restore values for variables in the graph' )
FLAGS, unparsed = parser.parse_known_args()
k_freqbins=257
k_width=856
g, savior = trainedModel.load(FLAGS.metamodel, FLAGS.checkptDir)
#vnamelist =[n.name for n in tf.global_variables()]
if VERBOSE :
vnamelist =[n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
print('TRAINABLE vars:')
for n in vnamelist :
print(n)
#opslist = [n.name for n in g.get_operations()]
#print('----Operatios in graph are : ' + str(opslist))
tf.GraphKeys.USEFUL = 'useful'
if VERBOSE :
print ('...and useful :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.USEFUL)
for v in all_vars:
print(v)
#
#print(' here we go ........')
var_list = tf.get_collection(tf.GraphKeys.USEFUL)
####tf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder
####tf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder
####tf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)
####tf.add_to_collection(tf.GraphKeys.USEFUL, h1)
####tf.add_to_collection(tf.GraphKeys.USEFUL, h2)
#X = g.get_tensor_by_name('X/Adam:0')# placeholder for input
#X = tf.placeholder(tf.float32, [None,k_freqbins*k_width], name= "X")
X=var_list[0]
#print('X is ' + str(X))
#keepProb = g.get_tensor_by_name('keepProb')
#keepProb=tf.placeholder(tf.float32, (), name= "keepProb")
keepProb=var_list[1]
#print('keepProb is ' + str(keepProb))
softmax_preds=var_list[2]
assert softmax_preds.graph is tf.get_default_graph()
def soundfileBatch(slist) :
# The training network scales to 255 and then flattens before stuffing into batches
return [np.array(Image.open(name).point(lambda i: i*255)).flatten() for name in slist ]
#just test the validation set
#Flipping and scaling seem to have almost no effect on the clasification accuracy
rimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',
'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',
'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',
'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',
'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',
'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',
'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',
'data2/validate/101 - Dog/5-203128-A._4_.tif',
'data2/validate/101 - Dog/5-203128-B._5_.tif',
'data2/validate/101 - Dog/5-208030-A._9_.tif',
'data2/validate/101 - Dog/5-212454-A._4_.tif',
'data2/validate/101 - Dog/5-213855-A._4_.tif',
'data2/validate/101 - Dog/5-217158-A._2_.tif',
'data2/validate/101 - Dog/5-231762-A._1_.tif',
'data2/validate/101 - Dog/5-9032-A._12_.tif',
])
#rimages=np.random.uniform(0.,1., (3,k_freqbins*k_width))
#print('got my image, ready to run!')
#Z = tf.placeholder(tf.float32, [k_freqbins*k_width], name= "Z")
#Y=tf.Variable(tf.truncated_normal([k_freqbins*k_width], stddev=0.1), name="Y")
#Y=tf.assign(Y,Z)
#with tf.Session() as sess:
# sess.run ( tf.global_variables_initializer ())
# foo = sess.run(Y, feed_dict={Z: rimage})
print(' here we go ........')
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
with tf.Session() as sess:
#sess.run ( tf.global_variables_initializer ())
#savior.restore(sess, tf.train.latest_checkpoint(FLAGS.checkptDir))
trainedModel.initialize_variables(sess)
if 0 :
print ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for v in all_vars:
v_ = sess.run(v)
print(v_)
if 0 :
for v in ["w1:0", "b1:0", "w2:0", "b2:0", "W_fc1:0", "b_fc1:0", "W_fc2:0", "b_fc2:0"] :
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v)))
if 1 :
for v in ["h1:0"] :
im = np.reshape(rimages[6], [1,k_width*k_freqbins ])
print(tf.get_default_graph().get_tensor_by_name(v))
print(sess.run(tf.get_default_graph().get_tensor_by_name(v), feed_dict ={ X : im, keepProb : 1.0 }))
print('predictions are : ')
for im_ in rimages :
im = np.reshape(im_, [1,k_width*k_freqbins ])
prediction = sess.run(softmax_preds, feed_dict ={ X : im, keepProb : 1.0 })
print(str(prediction[0]))
# Run the standard way .... in batches
#predictions = sess.run(softmax_preds, feed_dict ={ X : rimages , keepProb : 1.0 })
#print('predictions are : ')
#print(str(predictions))
| Python | 150 | 33.553333 | 170 | /testTrainedModel.py | 0.689236 | 0.634838 |
lonce/dcn_soundclass | refs/heads/master |
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import tensorflow as tf
import pickledModel
# get args from command line
import argparse
FLAGS = []
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--content', type=str, help='name of file in content dir, w/o .ext' )
parser.add_argument('--style', type=str, help='name of file in style dir, w/o .ext' )
parser.add_argument('--noise', type=float, help='in range [0,1]', default=.5 )
parser.add_argument('--iter', type=int, help='number of iterations (on cpu, runtime is less than 1 sec/iter)', default=600 )
parser.add_argument('--alpha', type=float, help='amount to weight conent', default=10 )
parser.add_argument('--beta', type=float, help='amount to weight style', default=200 )
parser.add_argument('--randomize', type=int, help='0: use trained weights, 1: randomize model weights', choices=[0,1], default=0 )
parser.add_argument('--weightDecay', type=float, help='factor for L2 loss to keep vals in [0,255]', default=.01 )
parser.add_argument('--outdir', type=str, help='for output images', default="." )
parser.add_argument('--stateFile', type=str, help='stored graph', default=None )
FLAGS, unparsed = parser.parse_known_args()
print('\n FLAGS parsed : {0}'.format(FLAGS))
if any(v is None for v in vars(FLAGS).values()) :
print('All args are required with their flags. For help: python style_transfer --help')
sys.exit()
CHECKPOINTING=False
FILETYPE = ".tif"
# parameters to manage experiments
STYLE = FLAGS.style
CONTENT = FLAGS.content
STYLE_IMAGE = 'content/' + STYLE + FILETYPE
CONTENT_IMAGE = 'content/' + CONTENT + FILETYPE
# This seems to be the paramter that really controls the balance between content and style
# The more noise, the less content
NOISE_RATIO = FLAGS.noise # percentage of weight of the noise for intermixing with the content image
# Layers used for style features. You can change this.
STYLE_LAYERS = ['h1', 'h2']
W = [1.0, 2.0] # give more weights to deeper layers.
# Layer used for content features. You can change this.
CONTENT_LAYER = 'h2'
#Relationship a/b is 1/20
ALPHA = FLAGS.alpha #content
BETA = FLAGS.beta #style
LOGDIR = FLAGS.outdir + '/log_graph' #create folder manually
CHKPTDIR = FLAGS.outdir + '/checkpoints' # create folder manually
OUTPUTDIR = FLAGS.outdir
ITERS = FLAGS.iter
LR = 2.0
WEIGHT_DECAY=FLAGS.weightDecay
def _create_range_loss(im) :
over = tf.maximum(im-255, 0)
under = tf.minimum(im, 0)
out = tf.add(over, under)
rangeloss = WEIGHT_DECAY*tf.nn.l2_loss(out)
return rangeloss
def _create_content_loss(p, f):
""" Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
pdims=p.shape
#print('p has dims : ' + str(pdims))
coef = np.multiply.reduce(pdims) # Hmmmm... maybe don't want to include the first dimension
#this makes the loss 0!!!
#return (1/4*coef)*tf.reduce_sum(tf.square(f-p))
return tf.reduce_sum((f-p)**2)/(4*coef)
def _gram_matrix(F, N, M):
""" Create and return the gram matrix for tensor F
Hint: you'll first have to reshape F
inputs: F: the tensor of all feature channels in a given layer
N: number of features (channels) in the layer
M: the total number of filters in each filter (length * height)
F comes in as numchannels*length*height, and
"""
# We want to reshape F to be number of feaures (N) by the values in the feature array ( now represented in one long vector of length M)
Fshaped = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(Fshaped), Fshaped) # return G of size #channels x #channels
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
Hint: 1. you'll have to use the function _gram_matrix()
2. we'll use the same coefficient for style loss as in the paper
3. a and g are feature representation, not gram matrices
"""
horizdim = 1 # recall that first dimension of tensor is minibatch size
vertdim = 2
featuredim = 3
# N - number of features
N = a.shape[featuredim] #a & g are the same shape
# M - product of first two dimensions of feature map
M = a.shape[horizdim]*a.shape[vertdim]
#print(' N is ' + str(N) + ', and M is ' + str(M))
# This is 'E' from the paper and the homework handout.
# It is a scalar for a single layer
diff = _gram_matrix(a, N, M)-_gram_matrix(g, N, M)
sq = tf.square(diff)
s=tf.reduce_sum(sq)
return (s/(4*N*N*M*M))
def _create_style_loss(A, model):
""" Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
# E has one dimension with length equal to the number of layers
E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
###############################
## TO DO: return total style loss
return np.dot(W, E)
###############################
def _create_losses(model, input_image, content_image, style_image):
print('_create_losses')
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
# model[CONTENT_LAYER] is a relu op
p = sess.run(model[CONTENT_LAYER])
content_loss = _create_content_loss(p, model[CONTENT_LAYER])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
reg_loss = _create_range_loss(model['X'])
##########################################
## TO DO: create total loss.
## Hint: don't forget the content loss and style loss weights
total_loss = ALPHA*content_loss + BETA*style_loss + reg_loss
##########################################
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
with tf.name_scope ( "summaries" ):
tf.summary.scalar ( "content loss" , model['content_loss'])
tf.summary.scalar ( "style_loss" , model['style_loss'])
tf.summary.scalar ( "total_loss" , model['total_loss'])
# because you have several summaries, we should merge them all
# into one op to make it easier to manage
return tf.summary.merge_all()
def train(model, generated_image, initial_image):
""" Train your model.
Don't forget to create folders for checkpoints and outputs.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run ( tf.global_variables_initializer ())
print('initialize .....')
writer = tf.summary.FileWriter(LOGDIR, sess.graph)
###############################
print('Do initial run to assign image')
sess.run(generated_image.assign(initial_image))
if CHECKPOINTING :
ckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTDIR + '/checkpoint'))
else :
ckpt = False
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
step_time=start_time
for index in range(initial_step, ITERS):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 100
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
###############################
## TO DO: obtain generated image and loss
# following the optimazaiton step, calculate loss
gen_image, total_loss, summary = sess.run([generated_image, model['total_loss'],
model['summary_op']])
###############################
#gen_image = gen_image + MEAN_PIXELS
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(sess.run(model['total_loss']))) #???????
print(' Time: {}'.format(time.time() - step_time))
step_time = time.time()
filename = OUTPUTDIR + '/%d.tif' % (index)
#pickledModel.save_image(np.transpose(gen_image[0][0]), filename)
print('style_transfer: about to save image with shape ' + str(gen_image.shape))
pickledModel.save_image(gen_image[0], filename)
if (index + 1) % 20 == 0:
saver.save(sess, CHKPTDIR + '/style_transfer', index)
print(' TOTAL Time: {}'.format(time.time() - start_time))
writer.close()
#-----------------------------------
print('RUN MAIN')
model=pickledModel.load(FLAGS.stateFile, FLAGS.randomize)
print('MODEL LOADED')
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = pickledModel.loadImage(CONTENT_IMAGE)
print('content_image shape is ' + str(content_image.shape))
print('content_image max is ' + str(np.amax(content_image) ))
print('content_image min is ' + str(np.amin(content_image) ))
#content_image = content_image - MEAN_PIXELS
style_image = pickledModel.loadImage(STYLE_IMAGE)
print('style_image max is ' + str(np.amax(style_image) ))
print('style_image min is ' + str(np.amin(style_image) ))
#style_image = style_image - MEAN_PIXELS
print(' NEXT, create losses')
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
model["X"], content_image, style_image)
###############################
## TO DO: create optimizer
## model['optimizer'] = ...
model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], var_list=[model["X"]])
###############################
model['summary_op'] = _create_summary(model)
initial_image = pickledModel.generate_noise_image(content_image, NOISE_RATIO)
#def train(model, generated_image, initial_image):
train(model, model["X"], initial_image)
#if __name__ == '__main__':
# main()
| Python | 299 | 37.183945 | 144 | /style_transfer.py | 0.612804 | 0.604922 |
lonce/dcn_soundclass | refs/heads/master | #
#
#Morgans great example code:
#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
#
# GitHub utility for freezing graphs:
#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py
#
#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants
import tensorflow as tf
import numpy as np
from PIL import TiffImagePlugin, ImageOps
from PIL import Image
import pickle
g_graph=None
#k_freqbins=257
#k_width=856
VERBOSE=0
#------------------------------------------------------------
#global
# gleaned from the parmeters in the pickle file; used to load images
height=0
width=0
depth=0
#-------------------------------------------------------------
def getShape(g, name) :
return g.get_tensor_by_name(name + ":0").get_shape()
def loadImage(fname) :
#transform into 1D width with frequbins in channel dimension (we do this in the graph in the training net, but not with this reconstructed net)
if (height==1) :
return np.transpose(np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,depth,width,1]), [0,3,2,1])
else :
return np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,height,width,1])
def generate_noise_image(content_image, noise_ratio=0.6):
print('generate_noise_image with height=' + str(height) + ', width =' + str(width) + ', and depth =' + str(depth))
noise_image = np.random.uniform(-1, 1, (1, height, width, depth)).astype(np.float32)
print('noise_image shape is ' + str(noise_image.shape))
return noise_image * noise_ratio + content_image * (1. - noise_ratio)
# Assumes caller puts image into the correct orientation
def save_image(image, fname, scaleinfo=None):
print('save_image: shape is ' + str(image.shape))
if (height==1) : # orientation is freq bins in channels
print('saving image in channel orientation')
image = np.transpose(image, [2,1,0])[:,:,0]
else :
print('saving image in image orientation')
image = image[:,:,0]
print('AFTER reshaping, save_image: shape is ' + str(image.shape))
print('image max is ' + str(np.amax(image) ))
print('image min is ' + str(np.amin(image) ))
# Output should add back the mean pixels we subtracted at the beginning
# [0,80db] -> [0, 255]
# after style transfer, images range outside of [0,255].
# To preserve scale, and mask low values, we shift by (255-max), then clip at 0 and then have all bins in the top 80dB.
image = np.clip(image-np.amax(image)+255, 0, 255).astype('uint8')
info = TiffImagePlugin.ImageFileDirectory()
if (scaleinfo == None) :
info[270] = '80, 0'
else :
info[270] = scaleinfo
#scipy.misc.imsave(path, image)
bwarray=np.asarray(image)/255.
savimg = Image.fromarray(np.float64(bwarray)) #==============================
savimg.save(fname, tiffinfo=info)
#print('RGB2TiffGray : tiffinfo is ' + str(info))
return info[270] # just in case you want it for some reason
def constructSTModel(state, params) :
global g_graph
g_graph = {}
#This is the variable that we will "train" to match style and content images.
##g_graph["X"] = tf.Variable(np.zeros([1,k_width*k_freqbins]), dtype=tf.float32, name="s_x_image")
##g_graph["x_image"] = tf.reshape(g_graph["X"], [1,k_height,k_width,k_inputChannels])
g_graph["X"] = tf.Variable(np.zeros([1,params['k_height'], params['k_width'], params['k_inputChannels']]), dtype=tf.float32, name="s_X")
g_graph["w1"]=tf.constant(state["w1:0"], name="s_w1")
g_graph["b1"]=tf.constant(state["b1:0"], name="s_b1")
#g_graph["w1"]=tf.Variable(tf.truncated_normal(getShape( tg, "w1"), stddev=0.1), name="w1")
#g_graph["b1"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b1")), name="b1")
# tf.nn.relu(tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1, name="h1")
g_graph["h1"]=tf.nn.relu(tf.nn.conv2d(g_graph["X"], g_graph["w1"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b1"], name="s_h1")
# 2x2 max pooling
g_graph["h1pooled"] = tf.nn.max_pool(g_graph["h1"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name="s_h1_pooled")
g_graph["w2"]=tf.constant(state["w2:0"], name="s_w2")
g_graph["b2"]=tf.constant(state["b2:0"], name="s_b2")
#g_graph["w2"]=tf.Variable(tf.truncated_normal(getShape( tg, "w2"), stddev=0.1), name="w2")
#g_graph["b2"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, "b2")), name="b2")
g_graph["h2"]=tf.nn.relu(tf.nn.conv2d(g_graph["h1pooled"], g_graph["w2"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph["b2"], name="s_h2")
g_graph["h2pooled"] = tf.nn.max_pool(g_graph["h2"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name='s_h2_pooled')
g_graph["convlayers_output"] = tf.reshape(g_graph["h2pooled"], [-1, params['k_downsampledWidth'] * params['k_downsampledHeight']*params['L2_CHANNELS']]) # to prepare it for multiplication by W_fc1
#
g_graph["W_fc1"] = tf.constant(state["W_fc1:0"], name="s_W_fc1")
g_graph["b_fc1"] = tf.constant(state["b_fc1:0"], name="s_b_fc1")
#g_graph["keepProb"]=tf.placeholder(tf.float32, (), name= "keepProb")
#g_graph["h_fc1"] = tf.nn.relu(tf.matmul(tf.nn.dropout(g_graph["convlayers_output"], g_graph["keepProb"]), g_graph["W_fc1"]) + g_graph["b_fc1"], name="h_fc1")
g_graph["h_fc1"] = tf.nn.relu(tf.matmul(g_graph["convlayers_output"], g_graph["W_fc1"]) + g_graph["b_fc1"], name="s_h_fc1")
#Read out layer
g_graph["W_fc2"] = tf.constant(state["W_fc2:0"], name="s_W_fc2")
g_graph["b_fc2"] = tf.constant(state["b_fc2:0"], name="s_b_fc2")
g_graph["logits_"] = tf.matmul(g_graph["h_fc1"], g_graph["W_fc2"])
g_graph["logits"] = tf.add(g_graph["logits_"] , g_graph["b_fc2"] , name="s_logits")
g_graph["softmax_preds"] = tf.nn.softmax(logits=g_graph["logits"], name="s_softmax_preds")
return g_graph
# Create and save the picke file of paramters
def saveState(sess, vlist, parameters, fname) :
# create object to stash tensorflow variables in
state={}
for v in vlist :
state[v.name] = sess.run(v)
# combine state and parameters into a single object for serialization
netObject={
'state' : state,
'parameters' : parameters
}
pickle.dump(netObject, open( fname, "wb" ))
# Load the pickle file of parameters
def load(pickleFile, randomize=0) :
print(' will read state from ' + pickleFile)
netObject=pickle.load( open( pickleFile, "rb" ) )
state = netObject['state']
parameters = netObject['parameters']
if randomize ==1 :
print('randomizing weights')
for n in state.keys():
print('shape of state[' + n + '] is ' + str(state[n].shape))
state[n] = .2* np.random.random_sample(state[n].shape).astype(np.float32) -.1
for p in parameters.keys() :
print('param[' + p + '] = ' + str(parameters[p]))
global height
height = parameters['k_height']
global width
width = parameters['k_width']
global depth
depth = parameters['k_inputChannels']
return constructSTModel(state, parameters)
| Python | 190 | 36.763157 | 197 | /pickledModel.py | 0.654961 | 0.625279 |
lonce/dcn_soundclass | refs/heads/master | """
"""
import tensorflow as tf
import numpy as np
import spectreader
import os
import time
import math
import pickledModel
# get args from command line
import argparse
FLAGS = None
# ------------------------------------------------------
# get any args provided on the command line
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--indir', type=str, help='directory holding TFRecords of data', default='.')
parser.add_argument('--outdir', type=str, help='output directory for logging', default='.')
parser.add_argument('--numClasses', type=int, help='number of classes in data', choices=[2,50], default=2) #default for testing
parser.add_argument('--checkpointing', type=int, help='0/1 - used for both saving and starting from checkpoints', choices=[0,1], default=0)
parser.add_argument('--checkpointPeriod', type=int, help='checkpoint every n batches', default=8)
parser.add_argument('--freqbins', type=int, help='number of frequency bins in the spectrogram input', default=513)
parser.add_argument('--numFrames', type=int, help='number of frames in the spectrogram input (must be divisible by 4)', default=424)
parser.add_argument('--learning_rate', type=float, help='learning rate', default=.001)
parser.add_argument('--batchsize', type=int, help='number of data records per training batch', default=8) #default for testing
parser.add_argument('--n_epochs', type=int, help='number of epochs to use for training', default=2) #default for testing
parser.add_argument('--keepProb', type=float, help='keep probablity for dropout before 1st fully connected layer during training', default=1.0) #default for testing
parser.add_argument('--batchnorm', type=int, help='0/1 - to batchnorm or not to batchnorm', choices=[0,1], default=1)
parser.add_argument('--freqorientation', type=str, help='freq as height or as channels', choices=["height","channels"], default="channels") #default for testing
parser.add_argument('--numconvlayers', type=int, help='number of convolutional layers', choices=[1,2], default=2) #default for testing
parser.add_argument('--l1channels', type=int, help='Number of channels in the first convolutional layer', default=32) #default for testing
parser.add_argument('--l2channels', type=int, help='Number of channels in the second convolutional layer (ignored if numconvlayers is 1)', default=64) #default for testing
parser.add_argument('--fcsize', type=int, help='Dimension of the final fully-connected layer', default=32) #default for testing
parser.add_argument('--convRows', type=int, help='size of conv kernernel in freq dimension if orientation is height (otherwise ignored)', default=5) #default for testing
parser.add_argument('--convColumns', type=int, help='size of conv kernernel in temporal dimension ', default=5) #default for testing
parser.add_argument('--optimizer', type=str, help='optimizer', choices=["adam","gd"], default="gd") #default for testing
parser.add_argument('--adamepsilon', type=float, help='epsilon param for adam optimizer', default=.1)
parser.add_argument('--learnCondition', type=str, help='when to learn', choices=["always","whenWrong"], default="always") #default for testing
parser.add_argument('--mtlnumclasses', type=int, help='if nonzero, train using secondary classes (which must be stored in TFRecord files', default=0)
FLAGS, unparsed = parser.parse_known_args()
print('\n FLAGS parsed : {0}'.format(FLAGS))
#HARD-CODED data-dependant parameters ------------------
#dimensions of image (pixels)
k_freqbins=FLAGS.freqbins
k_height=1 # default for freqs as channels
k_inputChannels=k_freqbins # default for freqs as channels
if FLAGS.freqorientation == "height" :
k_height=k_freqbins
k_inputChannels=1
k_numFrames=FLAGS.numFrames
#number of samples for training and validation
k_numClasses=FLAGS.numClasses #determines wether to read mini data set in data2 or full dataset in data50
validationSamples=8*k_numClasses
trainingSamples=32*k_numClasses
k_mtlnumclasses=FLAGS.mtlnumclasses #only matters if K_MTK is not 0
# ------------------------------------------------------
# Define paramaters for the training
learning_rate = FLAGS.learning_rate
k_batchsize = FLAGS.batchsize
n_epochs = FLAGS.n_epochs #6 #NOTE: we can load from checkpoint, but new run will last for n_epochs anyway
# ------------------------------------------------------
# Define paramaters for the model
K_NUMCONVLAYERS = FLAGS.numconvlayers
L1_CHANNELS=FLAGS.l1channels
L2_CHANNELS=FLAGS.l2channels
FC_SIZE = FLAGS.fcsize
k_downsampledHeight = 1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
# see https://www.tensorflow.org/api_guides/python/nn#convolution for calculating size from strides and padding
k_downsampledHeight = int(math.ceil(math.ceil(k_height/2.)/2.))# k_height/4 #in case were using freqs as y dim, and conv layers = 2
print(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))
k_downsampledWidth = k_numFrames/4 # no matter what the orientation - freqs as channels or as y dim
k_convLayerOutputChannels = L2_CHANNELS
if (K_NUMCONVLAYERS == 1) :
k_downsampledWidth = k_numFrames/2
k_convLayerOutputChannels = L1_CHANNELS
if FLAGS.freqorientation == "height" :
k_downsampledHeight = int(math.ceil(k_height/2.)) # k_height/2 #in case were using freqs as y dim, and conv layers = 1
print(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))
print(':::::: k_downsampledWidth is ' + str(k_downsampledWidth))
K_ConvRows=1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
K_ConvRows=FLAGS.convRows
K_ConvCols=FLAGS.convColumns
k_ConvStrideRows=1
k_ConvStrideCols=1
k_poolRows = 1 # default for freqs as channels
k_poolStrideRows = 1 # default for freqs as channels
if FLAGS.freqorientation == "height" :
k_poolRows = 2
k_poolStrideRows = 2
k_keepProb=FLAGS.keepProb
k_OPTIMIZER=FLAGS.optimizer
k_adamepsilon = FLAGS.adamepsilon
LEARNCONDITION = FLAGS.learnCondition
# ------------------------------------------------------
# Derived parameters for convenience (do not change these)
k_vbatchsize = min(validationSamples, k_batchsize)
k_numVBatches = validationSamples/k_vbatchsize
print(' ------- For validation, will run ' + str(k_numVBatches) + ' batches of ' + str(k_vbatchsize) + ' datasamples')
#ESC-50 dataset has 50 classes of 40 sounds each
k_batches_per_epoch = k_numClasses*40/k_batchsize
k_batchesPerLossReport= k_batches_per_epoch #writes loss to the console every n batches
print(' ----------will write out report every ' + str(k_batchesPerLossReport) + ' batches')
#k_batchesPerLossReport=1 #k_batches_per_epoch
# Create list of paramters for serializing so that network can be properly reconstructed, and for documentation purposes
parameters={
'k_height' : k_height,
'k_numFrames' : k_numFrames,
'k_inputChannels' : k_inputChannels,
'K_NUMCONVLAYERS' : K_NUMCONVLAYERS,
'L1_CHANNELS' : L1_CHANNELS,
'L2_CHANNELS' : L2_CHANNELS,
'FC_SIZE' : FC_SIZE,
'K_ConvRows' : K_ConvRows,
'K_ConvCols' : K_ConvCols,
'k_ConvStrideRows' : k_ConvStrideRows,
'k_ConvStrideCols' : k_ConvStrideCols,
'k_poolRows' : k_poolRows,
'k_poolStrideRows' : k_poolStrideRows,
'k_downsampledHeight' : k_downsampledHeight,
'k_downsampledWidth' : k_downsampledWidth,
'freqorientation' : FLAGS.freqorientation
}
# ------------------------------------------------------
#Other non-data, non-model params
CHECKPOINTING=FLAGS.checkpointing
k_checkpointPeriod = FLAGS.checkpointPeriod # in units of batches
INDIR = FLAGS.indir
OUTDIR = FLAGS.outdir
CHKPOINTDIR = OUTDIR + '/checkpoints' # create folder manually
CHKPTBASE = CHKPOINTDIR + '/model.ckpt' # base name used for checkpoints
LOGDIR = OUTDIR + '/log_graph' #create folder manually
#OUTPUTDIR = i_outdir
NUM_THREADS = 4 #used for enqueueing TFRecord data
#=============================================
def getImage(fnames, nepochs=None, mtlclasses=0) :
""" Reads data from the prepaired *list* files in fnames of TFRecords, does some preprocessing
params:
fnames - list of filenames to read data from
nepochs - An integer (optional). Just fed to tf.string_input_producer(). Reads through all data num_epochs times before generating an OutOfRange error. None means read forever.
"""
if mtlclasses :
label, image, mtlabel = spectreader.getImage(fnames, nepochs, mtlclasses)
else :
label, image = spectreader.getImage(fnames, nepochs)
#same as np.flatten
# I can't seem to make shuffle batch work on images in their native shapes.
image=tf.reshape(image,[k_freqbins*k_numFrames])
# re-define label as a "one-hot" vector
# it will be [0,1] or [1,0] here.
# This approach can easily be extended to more classes.
label=tf.stack(tf.one_hot(label-1, k_numClasses))
if mtlclasses :
mtlabel=tf.stack(tf.one_hot(mtlabel-1, mtlclasses))
return label, image, mtlabel
else :
return label, image
def get_datafiles(a_dir, startswith):
""" Returns a list of files in a_dir that start with the string startswith.
e.g. e.g. get_datafiles('data', 'train-')
"""
return [a_dir + '/' + name for name in os.listdir(a_dir)
if name.startswith(startswith)]
def batch_norm(x, is_trainingP, scope):
with tf.variable_scope(scope):
return tf.layers.batch_normalization(x,
axis=3, # is this right? - our conv2D returns NHWC ordering?
center=True,
scale=True,
training=is_trainingP,
name=scope+"_bn")
#=============================================
# Step 1: Read in data
# getImage reads data for enqueueing shufflebatch, shufflebatch manages it's own dequeing
# ---- First set up the graph for the TRAINING DATA
if k_mtlnumclasses :
target, data, mtltargets = getImage(get_datafiles(INDIR, 'train-'), nepochs=n_epochs, mtlclasses=k_mtlnumclasses)
imageBatch, labelBatch, mtltargetBatch = tf.train.shuffle_batch(
[data, target, mtltargets], batch_size=k_batchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000, #1000,
min_after_dequeue=500) #500
else :
target, data = getImage(get_datafiles(INDIR, 'train-'), n_epochs)
imageBatch, labelBatch = tf.train.shuffle_batch(
[data, target], batch_size=k_batchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000, #1000,
min_after_dequeue=500) #500
# ---- same for the VALIDATION DATA
# no need for mtl labels for validation
vtarget, vdata = getImage(get_datafiles(INDIR, 'validation-')) # one "epoch" for validation
#vimageBatch, vlabelBatch = tf.train.shuffle_batch(
# [vdata, vtarget], batch_size=k_vbatchsize,
# num_threads=NUM_THREADS,
# allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize
# enqueue_many=False, #IMPORTANT to get right, default=False -
# capacity=1000, #1000,
# min_after_dequeue=500) #500
vimageBatch, vlabelBatch = tf.train.batch(
[vdata, vtarget], batch_size=k_vbatchsize,
num_threads=NUM_THREADS,
allow_smaller_final_batch=False, #want to finish an eposh even if datasize doesn't divide by batchsize
enqueue_many=False, #IMPORTANT to get right, default=False -
capacity=1000)
# Step 2: create placeholders for features (X) and labels (Y)
# each lable is one hot vector.
# 'None' here allows us to fill the placeholders with different size batches (which we do with training and validation batches)
#X = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= "X")
X = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= "X")
if FLAGS.freqorientation == "height" :
x_image = tf.reshape(X, [-1,k_height,k_numFrames,k_inputChannels])
else :
print('set up reshaping for freqbins as channels')
foo1 = tf.reshape(X, [-1,k_freqbins,k_numFrames,1]) #unflatten (could skip this step if it wasn't flattenned in the first place!)
x_image = tf.transpose(foo1, perm=[0,3,2,1]) #moves freqbins from height to channel dimension
Y = tf.placeholder(tf.float32, [None,k_numClasses], name= "Y") #labeled classes, one-hot
MTLY = tf.placeholder(tf.float32, [None,k_mtlnumclasses], name= "MTLY") #labeled classes, one-hot
# Step 3: create weights and bias
trainable=[]
#Layer 1
# 1 input channel, L1_CHANNELS output channels
isTraining=tf.placeholder(tf.bool, (), name= "isTraining") #passed in feeddict to sess.runs
w1=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, k_inputChannels, L1_CHANNELS], stddev=0.1), name="w1")
trainable.extend([w1])
if (FLAGS.batchnorm==1) :
#convolve Wx (w/o adding bias) then relu
l1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME')
bn1=batch_norm(l1preactivation, isTraining, "batch_norm_1")
h1=tf.nn.relu(bn1, name="h1")
# 2x2 max pooling
else :
# convolve and add bias Wx+b
b1=tf.Variable(tf.constant(0.1, shape=[L1_CHANNELS]), name="b1")
trainable.extend([b1])
l1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1
h1=tf.nn.relu(l1preactivation, name="h1")
h1pooled = tf.nn.max_pool(h1, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME')
if K_NUMCONVLAYERS == 2 :
#Layer 2
#L1_CHANNELS input channels, L2_CHANNELS output channels
w2=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, L1_CHANNELS, L2_CHANNELS], stddev=0.1), name="w2")
trainable.extend([w2])
if (FLAGS.batchnorm==1) :
#convolve (w/o adding bias) then norm
l2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME')
bn2=batch_norm(l2preactivation, isTraining, "batch_norm_2")
h2=tf.nn.relu(bn2, name="h2")
else :
b2=tf.Variable(tf.constant(0.1, shape=[L2_CHANNELS]), name="b2")
trainable.extend([b2])
l2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b2
h2=tf.nn.relu(l2preactivation, name="h2")
with tf.name_scope ( "Conv_layers_out" ):
h2pooled = tf.nn.max_pool(h2, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME', name='h2_pooled')
print('k_downsampledWidth = ' + str(k_downsampledWidth) + ', k_downsampledHeight = ' + str(k_downsampledHeight) + ', L2_CHANNELS = ' + str(L2_CHANNELS))
print('requesting a reshape of size ' + str(k_downsampledWidth * k_downsampledHeight*L2_CHANNELS))
convlayers_output = tf.reshape(h2pooled, [-1, k_downsampledWidth * k_downsampledHeight*L2_CHANNELS]) # to prepare it for multiplication by W_fc1
#h2pooled is number of pixels / 2 / 2 (halved in size at each layer due to pooling)
# check our dimensions are a multiple of 4
if (k_numFrames%4) : # or ((FLAGS.freqorientation == "height") and k_height%4 )):
print ('Error: width and height must be a multiple of 4')
sys.exit(1)
else :
convlayers_output = tf.reshape(h1pooled, [-1, k_downsampledWidth * k_downsampledHeight*L1_CHANNELS])
#now do a fully connected layer: every output connected to every input pixel of each channel
W_fc1 = tf.Variable(tf.truncated_normal([k_downsampledWidth * k_downsampledHeight * k_convLayerOutputChannels, FC_SIZE], stddev=0.1), name="W_fc1")
b_fc1 = tf.Variable(tf.constant(0.1, shape=[FC_SIZE]) , name="b_fc1")
keepProb=tf.placeholder(tf.float32, (), name= "keepProb")
fc1preactivation = tf.matmul(tf.nn.dropout(convlayers_output, keepProb), W_fc1) + b_fc1
h_fc1 = tf.nn.relu(fc1preactivation, name="h_fc1")
#Read out layer
W_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_numClasses], stddev=0.1), name="W_fc2")
b_fc2 = tf.Variable(tf.constant(0.1, shape=[k_numClasses]), name="b_fc2")
trainable.extend([W_fc1, b_fc1, W_fc2, b_fc2])
if k_mtlnumclasses :
#MTL Read out layer - This is the only part of the net that is different for the secondary classes
mtlW_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_mtlnumclasses], stddev=0.1), name="mtlW_fc2")
mtlb_fc2 = tf.Variable(tf.constant(0.1, shape=[k_mtlnumclasses]), name="mtlb_fc2")
trainable.extend([mtlW_fc2, mtlb_fc2])
# Step 4: build model
# the model that returns the logits.
# this logits will be later passed through softmax layer
# to get the probability distribution of possible label of the image
# DO NOT DO SOFTMAX HERE
#could do a dropout here on h
logits_ = tf.matmul(h_fc1, W_fc2)
logits = tf.add(logits_ , b_fc2, name="logits")
if k_mtlnumclasses :
mtllogits = tf.matmul(h_fc1, mtlW_fc2) + mtlb_fc2
# Step 5: define loss function
# use cross entropy loss of the real labels with the softmax of logits
# returns a 1D tensor of length batchsize
if LEARNCONDITION=="whenWrong" :
summaryloss_primary_raw = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
smpreds = tf.nn.softmax(logits=logits, name="softmax_preds")
# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans
# equal returns a batchsize tensor of type boolean
wrong_preds = tf.not_equal(tf.argmax(smpreds, 1), tf.argmax(Y, 1))
# ones where labe != max of softmax, tensor of length batchsize
wrongMask = tf.cast(wrong_preds, tf.float32) # need numpy.count_nonzero(boolarr) :(
summaryloss_primary = tf.multiply(summaryloss_primary_raw, wrongMask, name="wrongloss")
else :
summaryloss_primary = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
meanloss_primary = tf.reduce_mean(summaryloss_primary)
if k_mtlnumclasses :
summaryloss_mtl = tf.nn.softmax_cross_entropy_with_logits(logits=mtllogits, labels=MTLY)
meanloss_mtl = tf.reduce_mean(summaryloss_mtl)
meanloss=meanloss_primary+meanloss_mtl
else :
meanloss=meanloss_primary
#if k_mtlnumclasses :
# meanloss = tf.assign(meanloss, meanloss_primary + meanloss_mtl) #training thus depends on MTLYY in the feeddict if k_mtlnumclasses != 0
#else :
# meanloss = tf.assign(meanloss, meanloss_primary)
# Step 6: define training op
# NOTE: Must save global step here if you are doing checkpointing and expect to start from step where you left off.
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer=None
if (k_OPTIMIZER == "adam") :
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=k_adamepsilon ).minimize(meanloss, var_list=trainable, global_step=global_step)
if (k_OPTIMIZER == "gd") :
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(meanloss, var_list=trainable, global_step=global_step)
assert(optimizer)
#Get the beta and gamma ops used for batchn ormalization since we have to update them explicitly during training
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print('extra update ops are ' + str(extra_update_ops))
#---------------------------------------------------------------
# VALIDATE
#--------------------------------------------------------------
# The nodes are used for running the validation data and getting accuracy scores from the logits
with tf.name_scope("VALIDATION"):
softmax_preds = tf.nn.softmax(logits=logits, name="softmax_preds")
# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans
# equal returns a batchsize tensor of type boolean
correct_preds = tf.equal(tf.argmax(softmax_preds, 1), tf.argmax(Y, 1))
batchNumCorrect = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(
# All this, just to feed a friggin float computed over several batches into a tensor we want to use for a summary
validationtensor = tf.Variable(0.0, trainable=False, name="validationtensor")
wtf = tf.placeholder(tf.float32, ())
summary_validation = tf.assign(validationtensor, wtf)
#-----------------------------------------------------------------------------------
# These will be available to other programs that want to use this trained net.
tf.GraphKeys.USEFUL = 'useful'
tf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder
tf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder
tf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)
tf.add_to_collection(tf.GraphKeys.USEFUL, w1)
if (FLAGS.batchnorm==0) :
tf.add_to_collection(tf.GraphKeys.USEFUL, b1)
tf.add_to_collection(tf.GraphKeys.USEFUL, w2)
if (FLAGS.batchnorm==0) :
tf.add_to_collection(tf.GraphKeys.USEFUL, b2)
tf.add_to_collection(tf.GraphKeys.USEFUL, W_fc1)
tf.add_to_collection(tf.GraphKeys.USEFUL, b_fc1)
tf.add_to_collection(tf.GraphKeys.USEFUL, W_fc2)
tf.add_to_collection(tf.GraphKeys.USEFUL, b_fc2)
#-----------------------------------------------------------------------------------
# Run the validation set through the model and compute statistics to report as summaries
def validate(sess, printout=False) :
with tf.name_scope ( "summaries" ):
# test the model
total_correct_preds = 0
try:
for i in range(k_numVBatches):
X_batch, Y_batch = sess.run([vimageBatch, vlabelBatch])
batch_correct, predictions = sess.run([batchNumCorrect, softmax_preds], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1., isTraining : False})
total_correct_preds += batch_correct
#print (' >>>> Batch " + str(i) + ' with batch_correct = ' + str(batch_correct) + ', and total_correct is ' + str(total_correct_preds))
if printout:
print(' labels for batch:')
print(Y_batch)
print(' predictions for batch')
print(predictions)
# print num correct for each batch
print(u'(Validation batch) num correct for batchsize of {0} is {1}'.format(k_vbatchsize , batch_correct))
print (u'(Validation EPOCH) num correct for EPOCH size of {0} ({1} batches) is {2}'.format(validationSamples , i+1 , total_correct_preds))
print('so the percent correction for validation set = ' + str(total_correct_preds/validationSamples))
msummary = sess.run(mergedvalidation, feed_dict ={ X : X_batch , Y : Y_batch, wtf : total_correct_preds/validationSamples, keepProb : 1., isTraining : False}) #using last batch to computer loss for summary
except Exception, e:
print e
return msummary
#--------------------------------------------------------------
# Visualize with Tensorboard
# -------------------------------------------------------------
def create_train_summaries ():
with tf.name_scope ( "train_summaries" ):
tf.summary.scalar ( "mean_loss" , meanloss_primary)
tf.summary.histogram ("w_1", w1)
tf.summary.histogram ("l1preactivation", l1preactivation)
tf.summary.histogram ("h_1", h1)
tf.summary.histogram ("w_2", w2)
tf.summary.histogram ("l2preactivation", l2preactivation)
tf.summary.histogram ("h_2", h2)
tf.summary.histogram ("w_fc1", W_fc1)
tf.summary.histogram ("fc1preactivation", fc1preactivation)
tf.summary.histogram ("h_fc1", h_fc1)
tf.summary.histogram ("w_fc2", W_fc2)
return tf.summary.merge_all ()
mergedtrain = create_train_summaries()
def create_validation_summaries ():
with tf.name_scope ( "validation_summaries" ):
#tf.summary.scalar ( "validation_correct" , batchNumCorrect)
tf.summary.scalar ( "summary_validation", summary_validation)
return tf.summary.merge_all ()
mergedvalidation = create_validation_summaries()
# --------------------------------------------------------------
# TRAIN
#---------------------------------------------------------------
def trainModel():
with tf.Session() as sess:
writer = tf.summary.FileWriter(LOGDIR) # for logging
saver = tf.train.Saver() # for checkpointing
#### Must run local initializer if nepochs arg to getImage is other than None!
#sess.run(tf.local_variables_initializer())
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
#not doing it here, but global_step could have been initialized by a checkpoint
if CHECKPOINTING :
ckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTBASE))
else :
ckpt = False
if ckpt and ckpt.model_checkpoint_path:
print('Checkpointing restoring from path ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
#only save graph if we are not starting run from a checkpoint
writer.add_graph(sess.graph)
initial_step = global_step.eval()
print('initial step will be ' + str(initial_step)) # non-zero if check pointing
batchcount=initial_step
start_time = time.time()
# Create a coordinator, launch the queue runner threads.
coord = tf.train.Coordinator()
enqueue_threads = tf.train.start_queue_runners(sess=sess,coord=coord)
try:
batchcountloss = 0 #for reporting purposes
while True: # for each batch, until data runs out
if coord.should_stop():
break
if k_mtlnumclasses :
X_batch, Y_batch, MTLY_batch = sess.run([imageBatch, labelBatch, mtltargetBatch])
_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, MTLY : MTLY_batch, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it?
else :
X_batch, Y_batch = sess.run([imageBatch, labelBatch])
_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it?
batchcountloss += loss_batch
batchcount += 1
if (not batchcount%k_batchesPerLossReport) :
print('batchcount = ' + str(batchcount))
avgBatchLoss=batchcountloss/k_batchesPerLossReport
print(u'Average loss per batch {0}: {1}'.format(batchcount, avgBatchLoss))
batchcountloss=0
tsummary = sess.run(mergedtrain, feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1.0, isTraining : False }) #?? keep prob ??
writer.add_summary(tsummary, global_step=batchcount)
vsummary=validate(sess)
writer.add_summary(vsummary, global_step=batchcount)
if not (batchcount % k_checkpointPeriod) :
saver.save(sess, CHKPTBASE, global_step=batchcount)
except tf.errors.OutOfRangeError, e: #done with training epochs. Validate once more before closing threads
# So how, finally?
print('ok, let\'s validate now that we\'ve run ' + str(batchcount) + 'batches ------------------------------')
vsummary=validate(sess, False)
writer.add_summary(vsummary, global_step=batchcount+1)
coord.request_stop(e)
except Exception, e:
print('train: WTF')
print e
finally :
coord.request_stop()
coord.join(enqueue_threads)
writer.close()
# grab the total training time
totalruntime = time.time() - start_time
print 'Total training time: {0} seconds'.format(totalruntime)
print(' Finished!') # should be around 0.35 after 25 epochs
print(' now save meta model')
meta_graph_def = tf.train.export_meta_graph(filename=OUTDIR + '/my-model.meta')
pickledModel.saveState(sess, trainable, parameters, OUTDIR + '/state.pickle')
print(' ===============================================================')
#=============================================================================================
print(' ---- Actual parameters for this run ----')
print('INDIR : ' + INDIR)
print('k_freqbins : ' + str(k_freqbins)
+ ' ' + 'k_numFrames: ' + str(k_numFrames) )
#FLAGS.freqorientation, k_height, k_numFrames, k_inputChannels
print('FLAGS.freqorientation: ' + str(FLAGS.freqorientation)
+ ', ' + 'k_height: ' + str(k_height)
+ ', ' + 'k_numFrames: ' + str(k_numFrames)
+ ', ' + 'k_inputChannels: ' + str(k_inputChannels))
#k_numClasses, validationSamples, trainingSamples
print('k_numClasses: ' + str(k_numClasses)
+ ', ' + 'validationSamples: ' + str(validationSamples)
+ ', ' + 'trainingSamples: ' + str(trainingSamples))
#learning_rate, k_keepProb, k_batchsize, n_epochs
print('learning_rate: ' + str(learning_rate)
+ ', ' + 'k_keepProb: ' + str(k_keepProb)
+ ', ' + 'k_batchsize: ' + str(k_batchsize)
+ ', ' + 'n_epochs: ' + str(n_epochs))
#K_NUMCONVLAYERS, L1_CHANNELS, L2_CHANNELS, FC_SIZE
print('K_NUMCONVLAYERS: ' + str(K_NUMCONVLAYERS)
+ ', ' + 'L1_CHANNELS: ' + str(L1_CHANNELS)
+ ', ' + 'L2_CHANNELS: ' + str(L2_CHANNELS)
+ ', ' + 'FC_SIZE: ' + str(FC_SIZE))
#k_downsampledHeight, k_downsampledWidth , k_convLayerOutputChannels
print('k_downsampledHeight: ' + str(k_downsampledHeight)
+ ', ' + 'k_downsampledWidth: ' + str(k_downsampledWidth)
+ ', ' + 'k_convLayerOutputChannels: ' + str(k_convLayerOutputChannels))
#K_ConvRows, K_ConvCols, k_ConvStrideRows, k_ConvStrideCols, k_poolRows, k_poolStrideRows
print('K_ConvRows: ' + str(K_ConvRows)
+ ', ' + 'K_ConvCols: ' + str(K_ConvCols)
+ ', ' + 'k_ConvStrideRows: ' + str(k_ConvStrideRows)
+ ', ' + 'k_ConvStrideCols: ' + str(k_ConvStrideCols)
+ ', ' + 'k_poolRows: ' + str(k_poolRows)
+ ', ' + 'k_poolStrideRows : ' + str(k_poolStrideRows ))
if (k_OPTIMIZER == "adam") :
print('k_OPTIMIZER: ' + str(k_OPTIMIZER)
+ ', ' + 'k_adamepsilon: ' + str(k_adamepsilon))
else :
print('k_OPTIMIZER: ' + str(k_OPTIMIZER))
print('LEARNCONDITION: ' + LEARNCONDITION)
print('batchnorm: ' + str(FLAGS.batchnorm))
print('k_mtlnumclasses: ' + str(k_mtlnumclasses))
#OUTDIR
print('OUTDIR: ' + str(OUTDIR))
print('CHECKPOINTING: ' + str(CHECKPOINTING))
print(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')
for x in trainable :
print(x.name + ' : ' + str(x.get_shape()))
print('TOTAL number of parameters in the model is ' + str(np.sum([np.product([xi.value for xi in x.get_shape()]) for x in trainable])))
print(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')
#=============================================================================================
# Do it
trainModel()
| Python | 685 | 42.769344 | 239 | /DCNSoundClass.py | 0.688113 | 0.674438 |
lonce/dcn_soundclass | refs/heads/master | import os
import re
import numpy as np
import math
import tiffspect
import librosa
import librosa.display
import matplotlib.pyplot as plt
K_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'
k_soundsPerClass=125 # must divide the total number of sounds evenly!
#============================================
def weightedCentroid(spect) :
"""
param: spect - a magnitude spectrum
Returns the spectral centroid averaged over frames, and weighted by the rms of each frame
"""
cent = librosa.feature.spectral_centroid(S=spect)
rms = librosa.feature.rmse(S=spect)
avg = np.sum(np.multiply(cent, rms))/np.sum(rms)
return avg
def log2mag(S) :
""" Get your log magnitude spectrum back to magnitude"""
return np.power(10, np.divide(S,20.))
def spectFile2Centroid(fname) :
""" Our spect files are in log magnitude, and in tiff format"""
D1, _ = tiffspect.Tiff2LogSpect(fname)
D2 = log2mag(D1)
return weightedCentroid(D2)
#============================================
# Next, some utilities for managing files
#----------------------------------------
def fullpathfilenames(directory):
'''Returns the full path to all files living in directory (the leaves in the directory tree)
'''
fnames = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(directory)) for f in fn]
return fnames
def esc50files(directory, regexString) :
filenames = fullpathfilenames(directory)
return [fname for fname in filenames if re.match(regexString, fname)]
def addClass2Filename(fname, cname, action="move") :
newname = re.sub('.tif', '._'+ str(cname) + '_.tif', fname)
if (action == "move") :
os.rename(fname, newname)
else :
print(newname)
def filestats (filenames, func) :
stats = [[fname, func(fname)] for fname in filenames]
return stats
#============================================
def createBalancedClassesWithFunc(topDirectory, regexString, func, numPerClass, action="move") :
"""
Groups files in topDirectory matching regexString by the single number returned by func.
Each group will have numPerClass files in it (the total number of files must be divisible by numPerClass)
Renames them using their group index, gidx: origFilename.tif -> origFilename._gidx_.tif
if action="move, files are renames. Otherwise, the new names are just printed to console.
"""
wholelist=esc50files(topDirectory, regexString)
stats = filestats(wholelist, func)
stats_ordered = sorted(stats, key=lambda a_entry: a_entry[1])
classes=np.array(stats_ordered)[:,0].reshape(-1, numPerClass)
for i in range(len(classes)) :
for j in range(len(classes[i])) :
addClass2Filename(classes[i,j],i, action)
return stats, stats_ordered #returns stuff just for viewing
#--------------------------------------------------------------------------------
#if you got yourself in trouble, and need to remove all the secondary classnames:
def removeAllSecondaryClassNames(directory) :
"""Revomve ALL the 2ndary class names (of the form ._cname_) from ALL files in the directory restoring them to their original"""
for fname in fullpathfilenames(directory) :
m = re.match('.*?(\._.*?_)\.tif$', fname) #grabs the string of all secondary classes if there is a seq of them
if (m) :
newname = re.sub(m.group(1), '', fname)
print('Will move ' + fname + '\n to ' + newname)
os.rename(fname, newname)
else :
print('do nothing with ' + fname)
#============================================
# DO IT
stats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action="print")
stats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action="move")
| Python | 100 | 38.009998 | 135 | /utils/Centroid2ndaryClassMaker.py | 0.633778 | 0.625064 |
scissorhands/pynal | refs/heads/master | from __future__ import print_function
import json
from etl import Etl
def lambda_connect(event, context):
etl = Etl()
etl.retrieve_all_stats()
return 'pickle rick'
if __name__ == '__main__':
lambda_connect(None, None) | Python | 11 | 19.363636 | 37 | /index.py | 0.695067 | 0.695067 |
scissorhands/pynal | refs/heads/master | import analytics as service
class Requester:
def __init__(self):
self.analytics = service.initialize_analyticsreporting()
self.general_stats_metrics = [
{'expression': 'ga:sessions'},
{'expression': 'ga:pageViews'},
{'expression': 'ga:avgTimeOnPage'},
{'expression': 'ga:exits'},
{'expression': 'ga:organicSearches'}
]
def get_hostname_stats(self, from_date = '7daysAgo', to_date = 'yesterday' ):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
# {'name' : 'ga:pagePath'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_city_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:city'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_region_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:region'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_devices_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:deviceCategory'},
{'name' : 'ga:date'}
],
from_date, to_date
) | Python | 56 | 28.071428 | 78 | /requester.py | 0.552551 | 0.550092 |
scissorhands/pynal | refs/heads/master | import dbconfig
import mysql.connector as _connector
from mysql.connector import errorcode as dberror
class Connector:
def __init__(self):
self.cnx = self.cur = None
try:
self.cnx = _connector.connect(**dbconfig.config)
except _connector.Error as e:
if(e.errno == dberror.ER_ACCESS_DENIED_ERROR):
print('Invalid credentials')
elif(e.errno == dberror.ER_BAD_DB_ERROR):
print('Invalid database')
else:
print(e)
else:
self.cur = self.cnx.cursor()
def test_select(self):
self.cur.execute("SELECT * FROM users AS U LIMIT 10")
print()
print('{0:3} {1:25} {2}'.format('ID:', 'EMAIL:', 'LANG:'))
for row in self.cur.fetchall():
print('{0:3} {1:25} {2}'.format(row[0], row[2], row[4]))
print()
def insert_ignore(self, table, data_dictionary):
insert_id = None
keys = "("+", ".join( "`"+key+"`" for key in data_dictionary.keys() )+")"
values = "("+", ".join( "%("+str(value)+")s" for value in data_dictionary.keys() )+")"
query = ("INSERT IGNORE INTO {0}\n"
"{1}\n"
"VALUES {2}".format(table, keys, values) )
try:
self.cur.execute(query, data_dictionary)
self.cnx.commit()
insert_id = self.cur.lastrowid
except Exception as e:
print(e)
return insert_id
def serv_destory(self):
if self.cur:
self.cur.close()
if self.cnx:
self.cnx.close()
print("Connection destroyed")
def main(self):
id = self.insert_ignore('analytics_hostname_stats', {
'hostname': 'hostname',
'sessions': 1,
'page_views': 1,
'avg_time_on_page': 2.1,
'exits': 3,
'organic_searches': 5,
'date': '2017-07-31',
})
if self.cur:
self.cur.close()
if self.cnx:
self.cnx.close()
if __name__ == '__main__':
connector = Connector()
connector.main() | Python | 69 | 24.202898 | 88 | /dbconnector.py | 0.620829 | 0.601266 |
scissorhands/pynal | refs/heads/master | from requester import Requester
from dbconnector import Connector
import json
import datetime as dt
class Etl:
def __init__(self):
self.req = Requester()
self.connector = Connector()
def get_report_dictionary(self, report):
columnHeader = report.get('columnHeader', {})
return {
'columnHeader': columnHeader,
'dimensionHeaders': columnHeader.get('dimensions', []),
'metricHeaders': columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []),
'rows': report.get('data', {}).get('rows', [])
}
def formatted_output(self, input):
stats = []
for report in input.get('reports', []):
rdictionary = self.get_report_dictionary(report)
for row in rdictionary['rows']:
stat = {}
dimensions = row.get('dimensions', [])
dateRangeValues = row.get('metrics', [])
for header, dimension in zip(rdictionary['dimensionHeaders'], dimensions):
hd = header.replace('ga:', '')
if(hd == 'date'):
dimension = dt.datetime.strptime(dimension, '%Y%m%d').strftime('%Y-%m-%d')
stat[hd] = dimension
for i, values in enumerate(dateRangeValues):
for metricHeader, value in zip(rdictionary['metricHeaders'], values.get('values') ):
stat[metricHeader.get('name').replace('ga:', '')] = value
stats.append(stat)
return stats
def retrieve_all_stats(self, destroy_after=True):
self.retrieve_hostname_stats(False)
self.retrieve_city_stats(False)
self.retrieve_region_stats(False)
self.retrieve_devices_stats(False)
if (destroy_after):
self.connector.serv_destory()
def retrieve_hostname_stats(self, destroy_after=True):
print('getting hostname stats')
report = self.req.get_hostname_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_hostname_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_city_stats(self, destroy_after=True):
print('getting city stats')
report = self.req.get_city_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_city_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_region_stats(self, destroy_after=True):
print('getting region stats')
report = self.req.get_region_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_region_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_devices_stats(self, destroy_after=True):
print('getting devices stats')
report = self.req.get_devices_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_device_stats",row)
if (destroy_after):
self.connector.serv_destory()
def main():
etl = Etl()
etl.retrieve_all_stats()
if __name__ == '__main__':
main() | Python | 90 | 31.299999 | 89 | /etl.py | 0.692361 | 0.681349 |
scissorhands/pynal | refs/heads/master | from requester import Requester
import json
req = Requester()
localTest = False
print('Loading function')
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
operations = {
'GET'
}
operation = event['httpMethod']
if operation in operations:
method = event['queryStringParameters']['method']
api_methods = {
'get_hostname_stats',
'get_city_stats',
'get_region_stats',
'get_devices_stats'
}
if method in api_methods:
stats = getattr(req, method)()
if(localTest):
print(stats)
return respond(None, stats)
else:
return respond(ValueError("Unsupported method '{}'".format(method)))
else:
return respond(ValueError('Unsupported http method "{}"'.format(operation)))
if __name__ == '__main__':
localTest = True
event = {
'httpMethod': 'GET',
'queryStringParameters': {
'method': 'get_hostname_stats'
}
}
lambda_handler(event, None) | Python | 51 | 24.254902 | 84 | /index_microservice.py | 0.547786 | 0.543124 |
jesbarlow/CP1404_practicals | refs/heads/master |
COLOUR_CODES = {"CadetBlue2": "#8ee5ee", "CornflowerBlue": "#6495ed", "Chartreuse4": "#458600",
"DarkOliveGreen3": "#a2cd5a", "DarkTurquoise": "#00ced1", "Gold1": "#ffd700",
"IndianRed2": "#eeb363", "PaleVioletRed2": "#ee799f", "RosyBrown4": "#8b6969",
"Snow2": "#eee9e9"}
colour = input("Enter the colour name:")
if colour in COLOUR_CODES:
print("Colour: {} Hex Code: {}\n".format(colour, COLOUR_CODES[colour]))
else:
print("Invalid Colour")
colour = input("Enter the colour name:")
for key, value in COLOUR_CODES.items():
print("Colour Name: {:<15} Hex Code: {:<7}".format(key, value)) | Python | 16 | 40 | 95 | /prac_5/colour_codes.py | 0.602134 | 0.535061 |
jesbarlow/CP1404_practicals | refs/heads/master | name_file = open('name.txt', 'w')
name = input("What is your name?: ")
name_file.write(name)
name_file.close()
open_file = open('name.txt', 'r')
open_file.read().strip()
print ("Your name is",name)
open_file.close()
out_file = open('numbers.txt', 'r')
num_one = int(out_file.readline())
num_two = int(out_file.readline())
print(num_one + num_two)
out_file.close() | Python | 16 | 21.9375 | 36 | /prac_2/files.py | 0.65847 | 0.65847 |
jesbarlow/CP1404_practicals | refs/heads/master | sentence = input("Enter a sentence:")
words = sentence.split()
counting = {}
for word in words:
if word in counting:
counting[word] += 1
else:
counting[word] = 1
print("Text: {}".format(sentence))
for key, value in counting.items():
print("{} : {}".format(key,value)) | Python | 14 | 20.357143 | 38 | /prac_5/word_count.py | 0.607383 | 0.600671 |
jesbarlow/CP1404_practicals | refs/heads/master | import random
def main():
quick_picks = int(input("How many quick picks? "))
print_quickpicks(quick_picks)
def print_quickpicks(quick_picks):
for num in range(quick_picks):
NUMBERS = [random.randrange(1, 46) for i in range(0, 6)]
NUMBERS.sort()
number_line = ['%.2d' % number for number in NUMBERS]
print(*number_line)
main()
| Python | 16 | 22.4375 | 64 | /prac_4/quickpick_lottery_generator.py | 0.619048 | 0.603175 |
jesbarlow/CP1404_practicals | refs/heads/master | """
CP1404/CP5632 - Practical
Answer the following questions:
1. When will a ValueError occur?
- value errors occur when the input os anything other than a number(including negative numbers),for example - the
letter a
2. When will a ZeroDivisionError occur?
- this will occur whenever the user inputs 0
3. Could you change the code to avoid the possibility of a ZeroDivisionError?
- yes, with input validation and a while loop that will just continue asking the user to
re enter a number until that number is no longer 0
"""
try:
numerator = int(input("Enter the numerator: "))
denominator = int(input("Enter the denominator: "))
fraction = numerator / denominator
print(fraction)
except ValueError:
print("Numerator and denominator must be valid numbers!")
except ZeroDivisionError:
print("Cannot divide by zero!")
print("Finished.") | Python | 23 | 37.913044 | 120 | /prac_2/exceptions.py | 0.724832 | 0.710291 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.