index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
21,873
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/testing.py
|
# ๋ค์ด๋ฒ ์ฃผ์
import csv, codecs
import urllib
import datetime
import time
import base64
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import requests
with codecs.open("jinhyun.csv","w", encoding='euc-kr') as fp: # ํ์ผ ์
์ถ๋ ฅ ๋์ ๋ฐฉ์ง ์ค๋ฅ ํจ๊ณผ ํ์
writer = csv.writer(fp, delimiter=",", quotechar='"') # writer๋ฅผ ์ ์ธํ๊ณ
writer.writerow(["date", "final_price", "nomal_price", "high_price", "low_price","trade_cnt"])
# ํค๋ ์ ๋ณด ์ฃผ์
header = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'}
# url ๋ง์ง๋ง ๋ถ๋ถ
stockItem = '035810'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+stockItem
request = urllib.request.Request(url, headers = header)
contents = urllib.request.urlopen(request)
#html = urlopen(url, headers= header)
source = contents.read()
source1 = source.decode('euc-kr')
print(source1)
soup = BeautifulSoup(source1, 'html.parser')
maxPage = soup.find_all("table", align="center")
mp = maxPage[0].find_all("td", class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1,300):
url = 'http://finance.naver.com/item/sise_day.nhn?code='+stockItem+'&page='+str(page)
request = urllib.request.Request(url, headers=header)
contents = urllib.request.urlopen(request)
source = contents.read()
source1 = source.decode('euc-kr')
soup = BeautifulSoup(source1, "html.parser")
srlists=soup.find_all("tr")
isCheckNone=None
if((page%1)==0):
time.sleep(1.5)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
print(srlists[i].td.text)
with codecs.open("jinhyun.csv", "a", encoding= "euc_kr ") as fp:
writer = csv.writer(fp, delimiter=",", quotechar='"')
writer.writerow([
srlists[i].find_all("td",align="center")[0].text
, srlists[i].find_all("td",class_="num")[0].text
, srlists[i].find_all("td",class_="num")[2].text
, srlists[i].find_all("td",class_="num")[3].text
, srlists[i].find_all("td",class_="num")[4].text
, srlists[i].find_all("td",class_="num")[5].text
])
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,874
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/blogview.py
|
from .models import state1
class blogView():
def __init__(self):
self.a = 0
def blog_all_query(self, ID):
query = state1.objects.filter(login_id=ID)
return query
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,875
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/urls.py
|
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^tests', views.tests, name='tests'),
url(r'^navertest/$', views.navertest, name='navertest'),
url(r'^product',views.product, name='product'),
url(r'^login', views.login, name='login'),
url(r'^signup', views.signup, name='signup'),
url(r'^d3', views.d3, name='d3'),
url(r'^wating', views.wating, name='wating'),
url(r'^waiting', views.waiting, name='waiting'),
url(r'^idcheck', views.idcheck, name='idcheck'),
url(r'^auth_login', views.auth_login, name='auth_login'),
url(r'^kmeans', views.kmeans, name='kmeans'),
url(r'^practice/$', views.practice, name='practice'),
url(r'^processing/$',views.processing, name='processing'),
url(r'^complete$', views.complete, name='complete'),
url(r'^positive$', views.positive, name='positive'),
url(r'^logout$', views.logout, name='logout'),
url(r'^bloglist$', views.bloglist, name='bloglist'),
url(r'^newslist$', views.newslist, name='newslist'),
url(r'^alllist$', views.alllist, name='alllist'),
url(r'^sendmail$', views.sendmail, name='sendmail'),
url(r'^task$', views.task,name='task'),
url(r'^state_save$', views.state_save, name='state_save'),
url(r'^twitter$', views.twitter, name='twitter'),
url(r'^twitterlist$', views.twitterlist, name='twitterlist'),
url(r'^admin$', views.admin, name='admin'),
url(r'^analysis$', views.analysis, name='analysis'),
url(r'^PNjudgment$', views.PNjudgment, name='PNjudgment'),
url(r'^blog_result$', views.blog_result, name='blog_result'),
url(r'^news_result$', views.news_result, name='news_result'),
url(r'^twitter_result$', views.twitter_result, name='twitter_result')
]
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,876
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/views.py
|
# Create your views here.
from django.shortcuts import render
from bs4 import BeautifulSoup
from django.http import JsonResponse
from django.http import HttpResponse
from operator import eq
from django.db.models import Q
from .models import state1,title,KBS,SBS,MBC,JTBC,YTN,dailyEconomy,moneyToday,eDaily,seoulEconomy,koreaEconomy,naver,Emoticon,word,news_count,naver_count,daum_count
from .models import Signup
from random import *
import time, threading
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from django.shortcuts import redirect
from .forms import UserForm, LoginForm
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from django.template import RequestContext
from django.views import View
from django.utils.encoding import python_2_unicode_compatible
from .signup import *
from .blogview import *
from .daum_blog import *
from .naver_blog import *
from .news import *
from .Analysis import *
import numpy as np
import pandas as pd
import datetime
import os
import sys
import json
import math
import requests
import django
import re
import csv,codecs
import uuid
from time import sleep
from .forms import DocumentForm
from importlib import import_module
from .models import Document
from django.conf import settings
#from konlpy.utils import pprint
from multiprocessing import Pool
from datetime import datetime
from django.core.paginator import Paginator
from django.template import loader
django.setup()
global BC8
def index(request):
return render(request, 'agri_crawler/index.html',{})
def kmeans(request):
return render(request, 'agri_crawler/kmeans.html',{})
def loading(request):
return render(request, 'agri_crawler/loading.html',{})
def login(request):
return render(request, 'agri_crawler/login.html',{})
def signup(request):
return render(request, 'agri_crawler/signup.html',{})
def practice(request):
return render(request, 'agri_crawler/practice.html',{})
def logout(request):
request.session.flush()
return render(request, 'agri_crawler/login.html',{})
def positive(request):
keyword = request.POST.get('keyword')
nickname = request.POST.get('nickname')
print(keyword)
print(nickname)
daumblog= daum_blog.objects.filter(keyword= keyword, nickname=nickname)
naverblog=naver.objects.filter(keyword=keyword,nickname=nickname)
kbs = KBS.objects.filter(keyword=keyword,nickname=nickname)
mbc = MBC.objects.filter(keyword=keyword,nickname=nickname)
sbs = SBS.objects.filter(keyword=keyword,nickname=nickname)
jtbc = JTBC.objects.filter(keyword=keyword,nickname=nickname)
ytn = YTN.objects.filter(keyword=keyword, nickname=nickname)
daily = dailyEconomy.objects.filter(keyword= keyword, nickname=nickname)
money = moneyToday.objects.filter(keyword=keyword, nickname=nickname)
eday = eDaily.objects.filter(keyword=keyword, nickname=nickname)
seoul = seoulEconomy.objects.filter(keyword=keyword, nickname=nickname)
korea = koreaEconomy.objects.filter(keyword=keyword, nickname=nickname)
f = open('output.txt', 'w', encoding='utf-8')
for i in daumblog:
f.write(str(i.sub_body.main_body))
for i in naverblog:
f.write(str(i.sub_body.main_body))
for i in kbs:
f.write(str(i.sub_body.main_body))
for i in mbc:
f.write(str(i.sub_body.main_body))
for i in sbs:
f.write(str(i.sub_body.main_body))
for i in jtbc:
f.write(str(i.sub_body.main_body))
for i in ytn:
f.write(str(i.sub_body.main_body))
for i in daily:
f.write(str(i.sub_body.main_body))
for i in money:
f.write(str(i.sub_body.main_body))
for i in eday:
f.write(str(i.sub_body.main_body))
for i in seoul:
f.write(str(i.sub_body.main_body))
for i in korea:
f.write(str(i.sub_body.main_body))
f.close()
return render(request, 'agri_crawler/positive.html',{})
def bloglist(request): # ๊ฐ์ธ ๋ธ๋ก๊ทธ ์์ง ํํฉ ํ์
name = request.POST.get('User')
wating = state1.objects.filter(type_state=1, login_id=str(name))
return render(request, 'agri_crawler/waiting.html',{'waiting':wating})
def newslist(request): # ๊ฐ์ธ ๋ด์ค ์์ง ํํฉ ํ์
name = request.POST.get('User')
wating = state1.objects.filter(type_state=0, login_id=name)
return render(request, 'agri_crawler/waiting1.html',{'waiting':wating})
def alllist(request):
wating = state1.objects.all()
return render(request, 'agri_crawler/waiting2.html',{'waiting':wating})
from django.core.mail import send_mail
def sendmail(request):
name = request.POST.get('name')
email = request.POST.get('email')
message = request.POST.get('message')
send_mail(name, message, email, ['thdtdmgus0@gmail.com'], fail_silently=False)
return render(request, 'agri_crawler/index.html')
def waiting(request): # ๋ด์ค
text = request.POST.get('text1')
start_date = request.POST.get('start_date1')
end_date = request.POST.get('end_date1')
KBS = request.POST.get('KBS')
MBC = request.POST.get('MBC')
SBS = request.POST.get('SBS')
JTBC = request.POST.get('JTBC')
YTN = request.POST.get('YTN')
Daily = request.POST.get('daily')
Money = request.POST.get('money')
eDaily = request.POST.get('eDaily')
seoul = request.POST.get('seoul')
korea = request.POST.get('korea')
title = request.POST.get('t')
date = request.POST.get('d')
keyword = request.POST.get('k')
body = request.POST.get('b')
emoticon = request.POST.get('e')
comment = request.POST.get('c')
recommend = request.POST.get('r')
ID = request.POST.get('id')
now = datetime.now()
today_date = str(now.year)+"."+str(now.month)+"."+str(now.day)
State1 = state1()
State1.keyword = text
State1.start_date = start_date
State1.end_date = end_date
State1.today_date = today_date
State1.login_id=ID
State1.state = 0
State1.type_state=2
State1.save()
condition = State1.state
query = state1.objects.filter(login_id= ID)
waiting = query
#page_row_count = 5
#page_display_count = 5 # ํ๋ฉด์ ๋ณด์ด๋ display ๊ฐ์
Users = state1.objects.filter(login_id=ID)
data={'start_date': start_date, 'end_date':end_date, 'title':title, 'date':date, 'keyword':text, 'body':body, 'emoticon':emoticon, 'comment':comment, 'recommend':recommend}
return render(
request,
'agri_crawler/waiting1.html',
{
'waiting':waiting,
'data':data,
'Users':Users
}
)
def wating(request): #๋ธ๋ก๊ทธ
text1 = request.POST.get('text1')#ํค์๋
start_date = request.POST.get('start_date1') #์์๊ธฐ๊ฐ
end_date = request.POST.get('end_date1') #์ข
๋ฃ๊ธฐ๊ฐ
naver_blog = request.POST.get('naver')
daum_blog = request.POST.get('daum')
title = request.POST.get('t')
date = request.POST.get('d')
keyword = request.POST.get('k')
body = request.POST.get('b')
emoticon = request.POST.get('e')
comment = request.POST.get('c')
recommend = request.POST.get('r')
ID = request.POST.get('id')
now = datetime.now()
today_date = str(now.year)+"."+str(now.month)+"."+str(now.day)
State1 = state1()
State1.keyword = text1
State1.start_date = start_date
State1.end_date = end_date
State1.today_date = today_date
State1.login_id=ID
State1.state = 0
State1.type_state=3
State1.save()
query = state1.objects.filter(login_id = ID)
waiting = query
data = {'daum_blog':daum_blog,'naver_blog': naver_blog,'text1': text1,'start_date': start_date,'end_date': end_date, 'title':title,'date': date, 'keyword':keyword,'body': body, 'emoticon':emoticon, 'comment':comment,'recommend': recommend}
return render(request, 'agri_crawler/waiting.html',{'waiting':waiting, 'data':data})
#def negative(request): # ๊ธ/๋ถ์ ํ๋จํ๊ฒ ํ๋ ๋ถ๋ถ
#positive=0
#negative=0
#neutral=0
#f = open('result.txt', 'r', encoding='utf8')
#lines = f.readlines()
#for i,line in enumerate(lines):
# if i==0:
# kw = line
# continue
# elif '๋์์' in line:
# continue
# elif 'function' in line:
# continue
# elif '//' in line:
# continue
# elif len(line.split())==0:
# continue
# sort = classfier()
# if sort.naive_classfier(str(line)) == 1:
# positive = positive+1
# elif sort.naive_classfier(str(line))==0:
# negative = negative+1
# elif sort.naive_classfier(str(line))==-1:
# neutral = neutral+1
#f.close()
#data = {'positive':positive, 'negative':negative, 'kw' :kw, 'neutral':neutral}
#return render(request, 'vegetable/googlechartnegative.html',{'data':data})
def idcheck(request):
id = request.POST.get('id',None)
data ={
'is_taken':Signup.objects.filter(ID=id).exists()
}
return JsonResponse(data)
#def identify(request):
# cits = Signup.objects.all().filter(ID="์ก์งํ")
# return render(request, 'vegetable/identify.html',{})
def d3(request):
id = request.POST.get('id')
print(id)
keys =[]
values = []
query = word.objects.filter(user_id = id).order_by('-value')[:10]
for i in query:
key =i.key
keys.append(key)
print(keys)
value = int(i.value)
values.append(value)
print(values)
json_keys = json.dumps(keys)
return render(request,'agri_crawler/d3.html', {'keys':json_keys, 'values':values})
def auth_login(request):
id = request.POST.get('username',None)
password = request.POST.get('password',None)
if id =="admin" and password=="1234":
State_model = state1.objects.all()
Admin = request.POST['username']
request.session['admin']=Admin
return render(request, 'agri_crawler/admin.html',{'State':State_model})
else:
#is_id = Signup.objects.filter(ID=id).exists()
#is_password = Signup.objects.filter(password=password).exists()
is_id = Signup.objects.filter(ID =id).exists()
is_password = Signup.objects.filter(password = password).exists()
data= {'username':is_id, 'password':is_password}
if is_id == True and is_password == True:
username = request.POST['username']
password = request.POST['password']
request.session['username'] = username
return redirect('index')
else:
return redirect('login')
def complete(request):
sign = signUp()
ID = request.POST.get('ID')
password = request.POST.get('Password')
email = request.POST.get('email')
sign.post(ID, password, email)
return render(request, 'agri_crawler/login.html',{})
class url_collector:
def __init__(self):
self.req_header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'}
self.url = "https://search.naver.com/search.naver?ie=utf8&where=news"
def add_property(self, Str, point_start_date, point_end_date, start):
self.param = {
'query': Str.encode('utf-8').decode('utf-8'),
'sm':'tab_pge',
'sort': '2',
'photo':'0',
'field':'0',
'pd':'3',
'ds': point_start_date,
'de': point_end_date,
'nso': 'so:r,p:',
'start': str(10*start+1)
}
return self.param
def login_session():
with requests.Session() as s:
req = s.get("https://nid.naver.com/nidlogin.login")
html = req.text
header = req.headers
status = req.status_code
is_ok = req.ok
def processing():
start_time = time.time()
pool = Pool(precesses=32)
pool.map(tests)
print(time.time()-start_time)
def task(request):
return render(request, 'agri_crawler/waiting1.html')
def state_save(Str, start_date, end_date, ID, type):
now = datetime.now()
today_date = str(now.year) + "." + str(now.month) + "." + str(now.day)
State1 = state1()
State1.keyword = Str
State1.start_date = start_date
State1.end_date = end_date
State1.today_date = today_date
State1.login_id = ID
State1.state = 0
if type == 0:
State1.type_state=0
elif type == 1:
State1.type_state=1
else:
State1.type_state=2
State1.save()
def tests(request):
if request.method =='POST':
Str = str(request.POST.get('text1'))
start_date = request.POST.get('start_date1')
end_date = request.POST.get('end_date1')
start = start_date.replace("-","")
end = end_date.replace("-","")
title = request.POST.get('t')
main_body = request.POST.get('b')
date = request.POST.get('d')
keyword = request.POST.get('k')
emoticon = request.POST.get('e')
comment = request.POST.get('c')
recommend = request.POST.get('l')
ID = request.POST.get('id')
media={}
if request.POST.get('KBS') == "KBS":
media['kbs']=True
else:
media['kbs']=False
if request.POST.get('MBC') == "MBC":
media['mbc']=True
else:
media['mbc']=False
if request.POST.get('SBS') == "SBS":
media['sbs']=True
else:
media['sbs']=False
if request.POST.get('JTBC') == "JTBC":
media['jtbc']=True
else:
media['jtbc']=False
if request.POST.get('YTN') == "YTN":
media['ytn']=True
else:
media['ytn']=False
if request.POST.get('daily') == "daily":
media['daily']=True
else:
media['daily']=False
if request.POST.get('money') == "money":
media['money']=True
else:
media['money']=False
if request.POST.get('eDaily') == "eDaily":
media['eDaily']=True
else:
media['eDaily']=False
if request.POST.get('seoul') == "seoul":
media['seoul']=True
else:
media['seoul']=False
if request.POST.get('korea') == "korea":
media['korea']=True
else:
media['korea']=False
state_save(Str, start_date, end_date, ID,0)
query = state1.objects.filter(login_id=ID, type_state=0)
waiting = query
name = state1.objects.filter(login_id=ID).order_by('-id').first()
number = name.id
news_collector = news_crawler(Str, start, end, ID,media, title, main_body, date, keyword, emoticon, comment, recommend,number)
news_collector.start()
data = {'text1': Str,'start_date': start,'end_date': end, 'title':title,'date': date, 'keyword':Str,'body': main_body}
return render(request,'agri_crawler/waiting1.html',{'waiting':waiting,'data':data})
def product(request):
return render(request, 'agri_crawler/product_0818.html',{})
def navertest(request):
global bkw
if request.method == 'POST': # ๋ง์ฝ POST ๋ฐฉ์์ผ๋ก ์ ๋ฌ์ด ๋์์ผ๋ฉด
if request.POST.get('naver'):
Str = str(request.POST.get('text1'))
start_date = request.POST.get('start_date1')
end_date = request.POST.get('end_date1')
start = start_date.replace("-","")
end = end_date.replace("-","")
title = request.POST.get('t')
main_body = request.POST.get('b')
date = request.POST.get('d')
keyword = request.POST.get('k')
url = request.POST.get('url')
ID = request.POST.get('id')
state_save(Str, start_date, end_date, ID,1)
print(Str)
query = state1.objects.filter(login_id=ID, type_state=1)
number = state1.objects.filter(login_id=ID).order_by('-id').first()
naver_collector = naver_crawler(Str,start,end,title,main_body,date,keyword,url,ID,number)
naver_collector.start()
data = {'text1': Str, 'start_date': start, 'end_date': end, 'title': title, 'date': date, 'keyword': Str,
'body': main_body}
return render(request, 'agri_crawler/waiting.html',{'waiting':query, 'data':data})
if request.POST.get('daum'):
Str = str(request.POST.get('text1')) # ๊ฒ์์ด
bkw = Str
start_date = request.POST.get('start_date1') # ์์์๊ฐ
end_date = request.POST.get('end_date1') # ๋์ฐฉ์๊ฐ
start = start_date.replace("-","") # -์ ์ ๊ฑฐ
end = end_date.replace("-","")
title = request.POST.get('t')
main_body = request.POST.get('b')
date = request.POST.get('d')
key = request.POST.get('k')
tag = request.POST.get('tag')
comment = request.POST.get('comment')
ID = request.POST.get('id')
state_save(Str, start_date, end_date, ID,1)
print(Str)
print(ID)
query = state1.objects.filter(login_id=ID, type_state=1)
name = state1.objects.filter(login_id=ID).order_by('-id').first()
print(name.id)
waiting = query
daum_collector = daum_crawler(bkw,start,end,ID,title,main_body,datetime,key,tag,comment)
daum_collector.start()
data = {'text1': Str,'start_date': start,'end_date': end, 'title':title,'date': date, 'keyword':Str,'body': main_body}
return render(request, 'agri_crawler/waiting.html', {'waiting':waiting, 'data':data})
def soup_text(text): # ํ๋ฃจ์น๋ง
url = "https://search.daum.net/search?w=social&m=web&sort_type=socialweb&nil_search=btn&DA=STC&enc=utf8&q="+str(text)
html = requests.get(url)
soup = BeautifulSoup(html.content, "html.parser")
return soup
def other_soup_text(text, nickname, content, time, ID):
today = datetime.now()
yesterday_day = today.day-1
if yesterday_day<1:
yesterday_day=31
today_mon=today.month
today_day=today.day
today_hour=today.hour
today_min=today.minute
today_sec=today.second
if today.month>=1 and today.month<10:
today_mon = "0"+str(today.month)
if today.day >=1 and today.day<10:
today_day = "0"+str(today.day)
if today.hour >=1 and today.hour<10:
today_hour ="0"+str(today.hour)
Today= str(today.year)+str(today_mon)+str(today.day)+str(today.hour)+str(today.minute)+str(today.second)
yesterday = str(today.year)+str(today_mon)+str(yesterday_day)+str(today.hour)+str(today.minute)+str(today.second)
print(Today)
print(yesterday)
url = "https://search.daum.net/search?w=social&m=web&sort_type=socialweb&nil_search=btn&DA=STC&enc=utf8&q="+str(text)+"&period=d&sd="+str(yesterday)+"&ed="+str(Today)
html = requests.get(url)
soup = BeautifulSoup(html.content, "html.parser")
div_list = soup.findAll("div",{"class":"box_con"})
for list in div_list:
id = list.find("div",{"class":"wrap_tit"}).text
content = list.find("span",{"class":"f_eb desc content_link"}).text
time = list.find("span",{"class":"f_nb"}).text
print(id)
print(content)
twitter_value = Twitter()
if nickname !="nickname":
id=""
if content != "content":
content=""
if time != "time":
time=""
twitter_value.userId=ID
twitter_value.id=id
twitter_value.content=content
twitter_value.time = time
twitter_value.save()
def twitter(request):
text = request.POST.get('text2')
one_day =request.POST.get('one_day')
all = request.POST.get('all')
nickname = request.POST.get('nickname')
content = request.POST.get('content')
time = request.POST.get('time')
ID= request.POST.get('id')
print(ID)
cnt= 0
if all == "all":
soup = soup_text(text)
div_list = soup.findAll("div", {"class": "box_con"})
for list in div_list:
id = list.find("div", {"class": "wrap_tit"}).text
content = list.find("span",{"class","f_eb desc content_link"}).text
time = list.find("span",{"class":"f_nb"}).text
twitter_value = Twitter()
if nickname !="nickname":
id= ""
if content !="content":
content=""
if time !="time":
time=""
twitter_value.userId= ID
twitter_value.Id = id
twitter_value.content=content
twitter_value.time= time
twitter_value.save()
cnt = cnt+1
state_save(text, 1,1,ID,2)
query = state1.objects.filter(login_id=ID, type_state=2)
elif one_day == "one_day":
other_soup_text(text, nickname, content, time, ID)
name = state1.objects.filter(login_id=ID).order_by('-id').first()
name.state = int(name.state) + cnt
name.save()
return render(request, 'agri_crawler/twitter.html',{'waiting':waiting})
def twitterlist(request):
return render(request, 'agri_crawler/twitter.html',{'waiting':waiting})
from .models import Twitter
def admin(request):
State_model = state1.objects.all()
request.session['admin'] = "admin"
daum_num = daum_blog.objects.all().count()
naver_num = naver.objects.all().count()
kbs_num = KBS.objects.all().count()
mbc_num = MBC.objects.all().count()
sbs_num = SBS.objects.all().count()
jtbc_num = JTBC.objects.all().count()
ytn_num = YTN.objects.all().count()
money = moneyToday.objects.all().count()
seoul = seoulEconomy.objects.all().count()
edaily = eDaily.objects.all().count()
korea = koreaEconomy.objects.all().count()
every = dailyEconomy.objects.all().count()
twit = Twitter.objects.all().count()
return render(request,
'agri_crawler/admin.html',
{'State':State_model,
'daum':daum_num,
'naver':naver_num,
'kbs':kbs_num,
'mbc':mbc_num,
'sbs':sbs_num,
'jtbc':jtbc_num,
'ytn':ytn_num,
'money':money,
'seoul':seoul,
'edaily':edaily,
'korea':korea,
'every':every,
'twit':twit
})
def analysis(request):
Bayes = BayesianFilter()
total_sentence = 0
print(total_sentence)
username = request.POST.get('id')
print(username)
f = open('output.txt', 'r', encoding='utf-8')
rline = f.readlines() # ์ ์ฒด ํ
์คํธ ์ฝ์ด์ค๊ธฐ
tline = f.read()
for i in rline:
print("๊ธฐ์ฌ:", i[:-1])
results_list = Bayes.split(tline)
all_count = Bayes.all_count(results_list)
print(all_count)
for key, value in all_count.items():
Word = word()
Word.user_id = username
Word.key = key
Word.value=value
Word.save()
return render(request, 'agri_crawler/product_0818.html',{})
def PNjudgment(request):
Bayes = BayesianFilter()
username = request.POST.get('id')
print(username)
f = open('output.txt', 'r', encoding='utf-8')
while True:
line = f.readline()
print(line)
if not line:
break
results_list = Bayes.split(line)
print(results_list)
Fit(Bayes)
return render(request, 'agri_crawler/product_0818.html', {})
def Fit(Bayes):
positive_read = open('positive1.txt', 'r', encoding='utf-8')
negative_read = open('negetive.txt', 'r', encoding='utf-8')
neutral_read = open('neutral.txt', 'r', encoding='utf-8')
positive_data = positive_read.read()
positive_list = Bayes.split(positive_data)
for data in positive_list:
Bayes.fit(data, "๊ธ์ ")
negative_data = negative_read.read()
negative_list = Bayes.split(negative_data)
for data in negative_list:
Bayes.fit(data, "๋ถ์ ")
neutral_data = neutral_read.read()
neutral_list = Bayes.split(neutral_data)
for data in neutral_list:
Bayes.fit(data, "์ค๋ฆฝ")
def blog_result(request):
login_id = request.POST.get('login_id')
id = request.POST.get('id')
count1 = 0
count2 = 0
naver = naver_count()
daum = daum_count()
value = naver.objects.filter(login_id=login_id).order_by('-id').first()
value.id = id
count1 = value.naver_count
value.save()
value2 = daum.objects.filter(login_id=login_id).order_by('-id').first()
value2.id = id
count2 = value2.daum_count
print(login_id)
print(id)
return render(request, 'agri_crawler/chart_blog.html',{'naver_count':value, 'daum_count':value2})
def news_result(request):
login_id = request.POST.get('login_id')
id = request.POST.get('id')
keyword =request.POST.get('keyword')
total = state1.objects.filter(login_id=login_id, id=id, type_state=0)
for i in total:
total_number = i.state
print(keyword)
query = news_count.objects.filter(login_id=login_id, id = int(id)-270)
kbs=''
mbc=''
sbs=''
jtbc=''
ytn=''
money=''
edaily=''
korea=''
economy=''
seoul=''
for i in query:
kbs = i.kbs_count
mbc = i.mbc_count
sbs = i.sbs_count
jtbc = i.jtbc_count
ytn = i.ytn_count
money = i.money_count
edaily = i.edaily_count
korea = i.korea_count
economy = i.dailyeconomy_count
seoul = i.seouleconomy_count
return render(request, 'agri_crawler/solution.html',{'kbs':kbs
,'mbc':mbc,
'sbs':sbs,
'jtbc':jtbc,
'ytn':ytn,
'money':money,
'edaily':edaily,
'korea':korea,
'economy':economy,
'seoul':seoul,
'keyword':keyword,
'total_number':total_number,
})
def twitter_result(request):
return render(request, 'agri_crawler/twitter_result.html',{})
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,877
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/migrations/0012_auto_20190203_1930.py
|
# Generated by Django 2.1.2 on 2019-02-03 10:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agri_crawler', '0011_word'),
]
operations = [
migrations.AddField(
model_name='word',
name='user_id',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='word',
name='key',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='word',
name='value',
field=models.CharField(max_length=200, null=True),
),
]
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,878
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/migrations/0009_auto_20190201_1648.py
|
# Generated by Django 2.1.2 on 2019-02-01 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agri_crawler', '0008_naver_nickname'),
]
operations = [
migrations.AddField(
model_name='koreaeconomy',
name='nickname',
field=models.CharField(max_length=130, null=True),
),
migrations.AlterField(
model_name='koreaeconomy',
name='keyword',
field=models.CharField(max_length=130, null=True),
),
]
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,879
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/models.py
|
# Create your models here.
from djongo import models
from django import forms
from django.contrib.auth.models import User
def min_length_3_validator(value):
if len(value) < 3:
raise forms.ValidationError('3๊ธ์ ์ด์ ์
๋ ฅํด์ฃผ์ธ์')
class Signup(models.Model):
ID = models.CharField(max_length=100)
password = models.CharField(max_length=100)
Email = models.CharField(max_length=100)
class Emoticon(models.Model):
like = models.CharField(max_length=100)
warm = models.CharField(max_length=100)
sad = models.CharField(max_length=100)
angry = models.CharField(max_length=100)
want = models.CharField(max_length=100)
class Meta:
abstract = True
class Document(models.Model):
description = models.CharField(max_length=255, blank=True)
file = models.FileField(upload_to = 'webapp/')
uploaded_at = models.DateTimeField(auto_now_add=True)
class EmoticonForm(forms.ModelForm):
class Meta:
model = Emoticon
fields = (
'like','warm','sad','angry','want'
)
class Twitter(models.Model):
userId = models.CharField(max_length=200, null=True)
Id= models.CharField(max_length=200, null=True)
content = models.CharField(max_length=200, null=True)
time = models.CharField(max_length=200, null=True)
class UploadFileModel(models.Model):
title = models.TextField(default='')
file = models.FileField(null=True)
class title(models.Model):
media = models.CharField(max_length=200, null=True)
main_title = models.CharField(max_length=200, null=True)
datetime = models.CharField(max_length=200, null=True)
main_body = models.CharField(max_length=12000, null=True)
count = models.FloatField(max_length=200, null=True)
class Meta:
abstract = True
class titleForm(forms.ModelForm):
class Meta:
model = title
fields = (
'media', 'main_title', 'datetime', 'main_body', 'count'
)
class media_count(models.Model):
kbs_count = models.CharField(max_length=200, null=True)
mbc_count = models.CharField(max_length=200, null=True)
sbs_count = models.CharField(max_length=200, null=True)
jtbc_count = models.CharField(max_length=200, null=True)
ytn_count = models.CharField(max_length=200, null=True)
money_count = models.CharField(max_length=200, null=True)
edaily_count = models.CharField(max_length=200, null=True)
korea_count = models.CharField(max_length=200, null=True)
dailyeconomy_count = models.CharField(max_length=200, null=True)
seouleconomy_count = models.CharField(max_length=200, null=True)
naver_count = models.CharField(max_length=200, null=True)
daum_count = models.CharField(max_length=200, null=True)
twitter_count = models.CharField(max_length=200, null=True)
class Meta:
abstract = True
class media_countForm(forms.ModelForm):
class Meta:
model = media_count
fields ={
'kbs_count', 'mbc_count', 'sbs_count', 'jtbc_count','ytn_count', 'money_count', 'edaily_count'
,'korea_count','dailyeconomy_count','seouleconomy_count','naver_count','daum_count','twitter_count'
}
class news_count(models.Model):
login_id = models.CharField(max_length=200, null=True)
kbs_count = models.CharField(max_length=200, null=True)
mbc_count = models.CharField(max_length=200, null=True)
sbs_count = models.CharField(max_length=200, null=True)
jtbc_count = models.CharField(max_length=200, null=True)
ytn_count = models.CharField(max_length=200, null=True)
money_count = models.CharField(max_length=200, null=True)
edaily_count = models.CharField(max_length=200, null=True)
korea_count = models.CharField(max_length=200, null=True)
dailyeconomy_count = models.CharField(max_length=200, null=True)
seouleconomy_count = models.CharField(max_length=200, null=True)
class naver_count(models.Model):
login_id = models.CharField(max_length=200, null=True)
naver_count = models.CharField(max_length=200, null=True)
class daum_count(models.Model):
login_id = models.CharField(max_length=200, null=True)
daum_count = models.CharField(max_length=200, null=True)
class twitter_count(models.Model):
login_id = models.CharField(max_length=200, null=True)
twitter_count = models.CharField(max_length=200, null=True)
class blogtitle(models.Model):
main_title = models.CharField(max_length=200)
main_body = models.CharField(max_length=200)
datetime = models.CharField(max_length=12000)
class Meta:
abstract = True
class word(models.Model):
user_id = models.CharField(max_length=200, null=True)
key = models.CharField(max_length=200, null=True)
value = models.CharField(max_length=200, null=True)
class blogForm(forms.ModelForm):
class Meta:
model = blogtitle
fields = (
'main_title','main_body','datetime'
)
class daum_blog(models.Model):
keyword = models.CharField(max_length=100, null=True)
nickname = models.CharField(max_length=100, null=True)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class= titleForm
)
tag = models.CharField(max_length=100, null=True)
comment = models.CharField(max_length=10000, null= True)
class KBS(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class= titleForm
)
class user_data(models.Model):
ID = models.CharField(max_length=100)
keyword = models.CharField(max_length=100)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class = titleForm
)
class state1(models.Model):
login_id = models.CharField(max_length=100)
keyword = models.CharField(max_length=100)
start_date = models.CharField(max_length=100)
end_date = models.CharField(max_length=100)
today_date = models.CharField(max_length=100)
state = models.CharField(max_length=100, null=True)
type_state = models.CharField(max_length=100, null=True)
class MBC(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class = titleForm
)
class SBS(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class = titleForm
)
class JTBC(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class YTN(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class dailyEconomy(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class moneyToday(models.Model):
keyword = models.CharField(max_length=130, null= True)
nickname=models.CharField(max_length=130,null=True)
sub_body= models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class eDaily(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname=models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class seoulEconomy(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname=models.CharField(max_length=130,null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class koreaEconomy(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname=models.CharField(max_length=130,null=True)
sub_body = models.EmbeddedModelField(
model_container=title,
model_form_class=titleForm
)
class naver(models.Model):
keyword = models.CharField(max_length=130, null=True)
nickname = models.CharField(max_length=130, null=True)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class=titleForm
)
main_url = models.CharField(max_length=130, null=True)
class daum(models.Model):
keyword = models.CharField(max_length=130)
sub_body = models.EmbeddedModelField(
model_container = title,
model_form_class=titleForm
)
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,880
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/news.py
|
from bs4 import BeautifulSoup
import requests, threading
import time
from .models import title, state1, KBS,SBS,MBC,JTBC,YTN, dailyEconomy, moneyToday, eDaily, seoulEconomy, koreaEconomy,Emoticon,news_count
class news_crawler(threading.Thread):
def __init__(self,keyword, sd, ed, ID,media,t,b,d,k,e,c,l,number):
threading.Thread.__init__(self)
self.keyword = keyword
self.sd = sd
self.ed = ed
self.ID = ID
self.t = t
self.b = b
self.d = d
self.k = k
self.e = e
self.c = c
self.l = l
self.media=media
self.number = number
def get_bs_obj(self, keyword, sd, ed, page): # beautifulsoup ๊ฐ์ฒด ์ป์
url = "https://search.daum.net/search?nil_suggest=btn&w=news&DA=STC&cluster=y&q="+keyword+"&p="+page+"&sd="+sd+"000000&ed="+ed+"235959&period=u"
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
return bs_obj
def get_data_date(self, keyword, sd, ed, page): #๋ ์ง ํ์ธํ๊ธฐ
bs_obj = self.get_bs_obj(keyword, sd, ed, page)
total_num = bs_obj.find("span",{"class":"txt_info"})
total_num = self.get_total_num(total_num)
return total_num
def get_total_num(self,total_num): # ๊ฑด์ ์์ธ์ฒ๋ฆฌํ๊ธฐ
total_text = total_num.text
split = total_text.split()
length = len(split)
if length == 4:
text = split[3].replace(",","")
text = text.replace("๊ฑด","")
else:
text = split[2].replace(",","")
text = text.replace("๊ฑด","")
text = int(text)
return text
def get_bs_incontent(self, url):
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
return bs_obj
def run(self):
datevalue = self.get_data_date(self.keyword, self.sd, self.ed, "1")
print(datevalue)
datevalue = int(datevalue/10)
cnt=0
count=[0,0,0,0,0,0,0,0,0,0]
News = news_count()
News.login_id= self.ID
for i in range(0,datevalue):
page = str(i)
bs_obj = self.get_bs_obj(self.keyword, self.sd, self.ed, page)
news_lists = bs_obj.findAll("div",{"class":"wrap_cont"})
for li in news_lists:
time.sleep(2)
span_text = li.find("span",{"class":"f_nb date"}).text
span_split = span_text.split()
len_span = len(span_split)
if len_span == 3:
continue
elif len_span == 5:
a_url = li.find("a",{"class":"f_nb"})
new_a_url = a_url['href']
new_bs_obj = self.get_bs_incontent(new_a_url)
Title = new_bs_obj.find("h3",{"class":"tit_view"}).text
body = new_bs_obj.find("div",{"id":"mArticle"}).text
times = new_bs_obj.find("span",{"class":"txt_info"}).text
print(self.k)
if self.k != "k":
self.keyword = ""
if self.b != "b":
body = ""
if self.d != "d":
times = ""
if self.t !="t":
Title = ""
contents = title(main_title = Title, main_body =body, datetime = times, media="์์ธ๊ฒฝ์ ", count=1)
print(Title)
print(span_split[2])
print(self.media['daily'])
if span_split[2] == "KBS" and self.media['kbs']==True:
kbs = KBS()
kbs.nickname=self.ID
kbs.keyword = self.keyword
kbs.sub_body = contents
kbs.save()
cnt = cnt+1
count[0]=count[0]+1
elif span_split[2] == "MBC" and self.media['mbc'] ==True:
mbc = MBC()
mbc.nickname=self.ID
mbc.keyword = self.keyword
mbc.sub_body = contents
mbc.save()
cnt = cnt+1
count[1]=count[1]+1
elif span_split[2] == "SBS" and self.media['sbs'] ==True:
sbs = SBS()
sbs.nickname=self.ID
sbs.keyword = self.keyword
sbs.sub_body = contents
sbs.save()
cnt = cnt+1
count[2]=count[2]+1
elif span_split[2] == "JTBC" and self.media['jtbc']==True:
jtbc = JTBC()
jtbc.nickname=self.ID
jtbc.keyword = self.keyword
jtbc.sub_body = contents
jtbc.save()
cnt = cnt+1
count[3]=count[3]+1
elif span_split[2] == "YTN" and self.media['ytn']==True:
ytn = YTN()
ytn.nickname=self.ID
ytn.keyword = self.keyword
ytn.sub_body = contents
ytn.save()
cnt = cnt+1
count[4]=count[4]+1
elif span_split[2] == "๋งค์ผ๊ฒฝ์ " and self.media['daily']==True:
dailyEco = dailyEconomy()
dailyEco.nickname=self.ID
dailyEco.keyword = self.keyword
dailyEco.sub_body = contents
dailyEco.save()
cnt = cnt+1
count[5]=count[5]+1
elif span_split[2] == "๋จธ๋ํฌ๋ฐ์ด" and self.media['money']==True:
money = moneyToday()
money.nickname=self.ID
money.keyword = self.keyword
money.sub_body = contents
money.save()
cnt = cnt+1
count[6]=count[6]+1
elif span_split[2] == "์ด๋ฐ์ผ๋ฆฌ" and self.media['eDaily']==True:
edaily = eDaily()
edaily.nickname=self.ID
edaily.keyword = self.keyword
edaily.sub_body = contents
edaily.save()
cnt = cnt+1
count[7]=count[7]+1
elif span_split[2] == "์์ธ๊ฒฝ์ " and self.media['seoul']==True:
seoul = seoulEconomy()
self.nickname=self.ID
seoul.keyword = self.keyword
seoul.sub_body = contents
seoul.save()
cnt = cnt+1
count[8]=count[8]+1
elif span_split[2] == "ํ๊ตญ๊ฒฝ์ " and self.media['korea']==True:
korea = koreaEconomy()
korea.nickname=self.ID
korea.keyword = self.keyword
korea.sub_body = contents
korea.save()
cnt = cnt+1
count[9]=count[9]+1
name = state1.objects.filter(id=self.number, type_state=0).first()
name.state= cnt
name.save()
News.kbs_count=int(count[0])
News.mbc_count=int(count[1])
News.sbs_count=int(count[2])
News.jtbc_count=int(count[3])
News.ytn_count=int(count[4])
News.dailyeconomy_count=int(count[5])
News.edaily_count=int(count[6])
News.korea_count=int(count[7])
News.money_count=int(count[8])
News.seouleconomy_count=int(count[9])
News.save()
print("๋")
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,881
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/example_python/gensim.py
|
from gensim.models import Word2Vec
from konlpy.tag import Twitter
file = open("output.txt", "r", encoding="utf-8")
line = file.read()
lines = line.split("\r\n")
results = []
twitter = Twitter()
for line in lines:
r = []
malist = twitter.pos(line, norm=True, stem=True)
for (word, pumsa) in malist:
if not pumsa in ["Josa", "Eomi", "Punctuation"]:
r.append(word)
results.append((" ".join(r)).strip())
output = (" ".join(results)).strip()
with open("toji.wakati", "w", encoding="utf-8") as fp:
fp.write(output)
data = word2vec.LineSentence("toji.wakati") # ์ด๋ค ๋ฌธ์ฅ๋ค์ ๋ฃ์ด์ ๋ถ๋ฆฌ
model = word2vec.Word2Vec(data, size=200, window=10, hs=1 , min_count=2, sg=1)
model.save("toji.model")
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,882
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/migrations/0015_blog_count_news_count_twitter_count.py
|
# Generated by Django 2.1.2 on 2019-02-13 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agri_crawler', '0014_remove_state1_total_count'),
]
operations = [
migrations.CreateModel(
name='blog_count',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('login_id', models.CharField(max_length=200, null=True)),
('naver_count', models.CharField(max_length=200, null=True)),
('daum_count', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='news_count',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('login_id', models.CharField(max_length=200, null=True)),
('kbs_count', models.CharField(max_length=200, null=True)),
('mbc_count', models.CharField(max_length=200, null=True)),
('sbs_count', models.CharField(max_length=200, null=True)),
('jtbc_count', models.CharField(max_length=200, null=True)),
('ytn_count', models.CharField(max_length=200, null=True)),
('money_count', models.CharField(max_length=200, null=True)),
('edaily_count', models.CharField(max_length=200, null=True)),
('korea_count', models.CharField(max_length=200, null=True)),
('dailyeconomy_count', models.CharField(max_length=200, null=True)),
('seouleconomy_count', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='twitter_count',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('login_id', models.CharField(max_length=200, null=True)),
('twitter_count', models.CharField(max_length=200, null=True)),
],
),
]
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,883
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/GL_ModelCreator.py
|
'''
๋ชจ๋ธ ์์ฑ ๋ชจ๋
Model Creating Module
created by Good_Learning
date : 2018-08-21
๋ชจ๋ธ์ ์์ฑํ๋ ๋ถ๋ถ์ ๋งก๋๋ค.
RNN ์ค LSTM์ ์ ๋ฐ์ ์ธ ๊ณ์ธต๊ด๊ณ์ ๊ตฌ์กฐ, ํ์ต๊ณผ์ ์ ์ฌ๊ธฐ์ ๊ฒฐ์ ํ๋ค.
'''
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Dropout
from keras.callbacks import EarlyStopping
import keras
import math
import numpy as np
class ModelsCreator:
model = Sequential()
look_back = 15
def __init__(self):
self.model.add(LSTM(32, input_shape=(1, self.look_back), activation='relu'))
self.model.add(Dense(1, activation='relu'))
def settingLearningEnvironment(self, loss='mean_squared_error', optimizer='adam'):
self.model.compile(loss=loss, optimizer=optimizer)
def training(self, trainX, trainY,valid_x, valid_y):
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=2, mode='auto')
hist = self.model.fit(trainX, trainY, validation_data=(valid_x, valid_y), epochs=10, batch_size=1, shuffle=False, verbose=2, callbacks=[early_stopping])
return hist
def tester(self, test_x, test_y, nptf, scaler):
test_predict = self.model.predict(test_x)
test_predict = scaler.inverse_transform(test_predict)
test_y = scaler.inverse_transform(test_y)
test_score = math.sqrt(mean_squared_error(test_y, test_predict))
print('Train Score: %.2f RMSE' % test_score)
# predict last value (or tomorrow?)
#last_x = nptf[-1]
#last_x = np.reshape(last_x, (1, 1, 1))
#last_y = self.model.predict(last_x)
#last_y = scaler.inverse_transform(last_y)
#print('Predict the Close value of final day: %d' % last_y) # ๋ฐ์ดํฐ ์
๋ ฅ ๋ง์ง๋ง ๋ค์๋ ์ข
๊ฐ ์์ธก
return test_predict, test_y
# ์์ดํ
, ์ํ์ํ๋์ง - ๋ฒ์ฉ CSV ์๊ณ์ด ๋ฐ์ดํฐ ๋ถ์๊ธฐ
#๋ถ์ํ ๋ ๋ฐ์ดํฐ ์ด๋ป๊ฒ ์ป๊ณ - ์๊ณ์ด ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํ๋ค.
#์ด๋ฐ ๋ฐ์ดํฐ๋ฅผ ์ผ๋๋ฐ ์ด๋ฐ ์ ํธ๋ฆฌ๋ทฐํธ๊ฐ ์ ์ผ ๋ง๊ณ , ์ค์ํ๊ณ ๊ทธ๋ํ๋ฅผ ํตํด์ ๋ณด์ฌ์ฃผ๋ฉด ์ค์ํ ๊ฑฐ๊ฐ์์
#๋ถ์ํ๊ธฐ ์ํด์ ์ด๋ค ๋ฐฉ๋ฒ์ ์ผ๋์ง ์ ํ๋๊ฐ ์ด๋ค์ง
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,884
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/Analysis.py
|
import math, sys
from konlpy.tag import Twitter
class BayesianFilter:
""" ๋ฒ ์ด์ง์ ํํฐ """
def __init__(self):
self.words= set() # ์ถํํ ๋จ์ด ๊ธฐ๋ก
self.word_dict = {} # ์นดํ
๊ณ ๋ฆฌ๋ง๋ค์ ์ถํ ํ์ ๊ธฐ๋ก
self.category_dict = {} #์นดํ
๊ณ ๋ฆฌ ์ถํ ํ์ ๊ธฐ๋ก
self.word_count={} #๊ฐ๊ฐ์ ์๋ ์นด์ดํธ ๊ธฐ๋ก
def split(self, text):
results = []
twitter = Twitter()
malist = twitter.pos(text, norm=True, stem=True)
for word in malist:
if not word[1] in ["Josa","Eomi","Punctuation"]:
results.append(word[0])
return results
def all_count(self, text):
word_list = text
for word in word_list:
if not word in self.word_count:
self.word_count[word] = 1
else:
self.word_count[word] += 1
return self.word_count
def inc_word(self, word, category):
if not category in self.word_dict:
self.word_dict[category]={}
if not word in self.word_dict[category]:
self.word_dict[category][word]=0
self.word_dict[category][word]+=1
self.words.add(word)
def inc_category(self,category):
if not category in self.category_dict:
self.category_dict[category]=0
self.category_dict[category]+=1
def fit(self, text, category):
""" ํ
์คํธ ํ์ต """
word_list = self.split(text)
print(word_list)
for word in word_list:
self.inc_word(word, category)
self.inc_category(category)
print(self.category_dict)
print(self.word_dict)
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,885
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/example_python/practice_bayes.py
|
import math,sys
from konlpy.tag import Twitter
class Bayes:
def __init__(self):
self.words = set() #,์ถํํ ๋จ์ด ๊ธฐ๋ก
self.word_dict = {} # ์นดํ
๊ณ ๋ฆฌ๋ง๋ค ์ถํ ํ์ ๊ธฐ๋ก
self.category_list = {} # ์นดํ
๊ณ ๋ฆฌ ์ถํ ํ์ ๊ธฐ๋ก
# ํํ์ ๋ถ์ํ๊ธฐ
def split(self, text):
results = []
twitter = Twitter()
malist = twitter.pos(text, norm=True, stem=True)
for word in malist:
if not word[1] in ["Josa", "Eomi","Punctuation"]:
results.append(word[0])
return results
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,886
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/daum_comment.py
|
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('C:/Users/thdwlsgus0/Desktop/chromedriver_win32/chromedriver.exe')
#driver = webdriver.PhantomJS('C:/Users/thdwlsgus0/Desktop/phantomjs-2.1.1-windows/phantomjs-2.1.1-windows/bin/phantomjs.exe')
driver.implicitly_wait(3)
driver.get('https://logins.daum.net/accounts/loginform.do?')
driver.find_element_by_name('id').send_keys('thdwlsgus10')
driver.find_element_by_name('pw').send_keys('operwhe123!')
driver.find_element_by_xpath("//button[@class='btn_comm']").click()
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,887
|
thdwlsgus0/vegetable_crawler
|
refs/heads/master
|
/agriculture/agriculture/agri_crawler/signup.py
|
# ์๋น์ด ์์ค ํ์๊ฐ์
๋ถ๋ถ
from .models import Signup
from django.shortcuts import render
class signUp():
def __init__(self):
self.result= 0
def get(self):
return render(request, 'vegetable/signup.html', {})
def post(self, ID, password, email):
person_info = Signup()
person_info.ID = ID
person_info.Email = email
person_info.password = password
person_info.save()
'''if user_id is None or user_pw is None or user_email is None:
return render(request, 'vegetable/signup.html', {})
else:
connection= models.Mongo()
val = connection.Find_id_Mongo(user_id)
if val ==1:
return render(request, 'vegetable/signup.html',{})
else:
connection.Insert_info_Mongo(user_id, user_pw, user_name, user_email)
return render(request, 'vegetable/login.html')
'''
'''
class logIn(View):
def get(self, request, *args, **kwargs):
return render(request, 'dblab/login_html',{})
def post(self,request, *args, **kwargs):
user_id =request.POST['login_id']
user_pw = request.POST['login_pw']
if(user_id is None or user_pw is None):
return render(request, 'dblab/login.html',{})
else:
connection = models.Mongo()
val1 = connection.Verify_id_Mongo(user_id)
val2 = connection.Verify_id_pw_Mongo(user_id,user_pw)
if val1 == 1:
# ์์ด๋์ ๋น๋ฐ๋ฒํธ ๋ชจ๋ ์ผ์นํ๋ค๋ฉด
if val2 == 1:
# ์ฑ๊ณต ์ถ๋ ฅ ํ ๋ก๊ทธ์ธ
return HttpResponse("๋ก๊ทธ์ธ์ฑ๊ณต")
else:
return render(request, 'dblab/login.html', {})
# ์
๋ ฅํ ์์ด๋๊ฐ ์กด์ฌํ์ง ์๋๋ค๋ฉด
else:
# ์คํจ ์ถ๋ ฅ ํ ๋๋์๊ฐ๊ธฐ
'''
|
{"/agriculture/agriculture/agri_crawler/daum_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/forms.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/naver_blog.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/blogview.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/views.py": ["/agriculture/agriculture/agri_crawler/models.py", "/agriculture/agriculture/agri_crawler/forms.py", "/agriculture/agriculture/agri_crawler/signup.py", "/agriculture/agriculture/agri_crawler/blogview.py", "/agriculture/agriculture/agri_crawler/daum_blog.py", "/agriculture/agriculture/agri_crawler/naver_blog.py", "/agriculture/agriculture/agri_crawler/news.py", "/agriculture/agriculture/agri_crawler/Analysis.py"], "/agriculture/agriculture/agri_crawler/news.py": ["/agriculture/agriculture/agri_crawler/models.py"], "/agriculture/agriculture/agri_crawler/signup.py": ["/agriculture/agriculture/agri_crawler/models.py"]}
|
21,900
|
Krupali0609/SSW567_HW04-1
|
refs/heads/main
|
/HW_04_test.py
|
import unittest
from HW_04 import get_repo
class TestgetRepo(unittest.TestCase):
def test_repo(self):
expected = ['User: HeliPatel98',
'Repository: helloworld Number of commits: 2',
'Repository: SSW-567 Number of commits: 2',
'Repository: SSW-695_COOKIT Number of commits: 1',
'Repository: SSW567_HW04 Number of commits: 13',
'Repository: Student_Repository Number of commits: 23',
'Repository: Triangle567 Number of commits: 17']
self.assertEqual(get_repo(), expected)
if __name__ == '__main__':
unittest.main()
|
{"/HW_04_test.py": ["/HW_04.py"]}
|
21,901
|
Krupali0609/SSW567_HW04-1
|
refs/heads/main
|
/HW_04.py
|
import requests
import json
def get_repo(user_name = 'HeliPatel98'):
output = []
url = 'https://api.github.com/users/{}/repos'.format(user_name)
resq = requests.get(url)
repos = json.loads(resq.text)
output.append('User: {}'.format(user_name))
try:
repos[0]['name']
except(TypeError, KeyError, IndexError):
return 'unable to fetch repository'
try:
for repo in repos:
repo_name = repo['name']
repo_url = 'https://api.github.com/repos/{}/{}/commits'.format(user_name, repo_name)
repo_info = requests.get(repo_url)
repo_info_json = json.loads(repo_info.text)
output.append('Repository: {} Number of commits: {}'.format(repo_name,len(repo_info_json)))
except(TypeError, KeyError, IndexError):
return 'unable to fetch commits'
return output
if __name__ == '__main__':
for ex in get_repo():
print(ex)
|
{"/HW_04_test.py": ["/HW_04.py"]}
|
21,902
|
andriisoroka/restapi
|
refs/heads/master
|
/app/api/users.py
|
from flask_restful import Resource,reqparse
from app.jsoongia import Serializer, relationships
from flask import request
class UserSerializer(Serializer):
ref = 'id'
type = 'user'
attributes = ['name','email','password']
mass = [
{"id":1,"name":"Andrii Soroka","email":'andrii_soroka@ukr.net',"password":'12121dasdsdcd'},
{"id":2,"name":"Uliana Soroka","email":"starosta_7@mail.ru","password":'dfhjk4389034kl'}
]
parse_data_model = reqparse.RequestParser()
parse_data_model.add_argument('data',type=dict)
class User(Resource):
def get(self,id):
serializer = UserSerializer()
res = serializer.serialize(mass[0],{})
return res
def put(self,id):
return []
def delete(self,id):
return []
class UserList(Resource):
def get(self):
serializer = UserSerializer()
res = serializer.serialize(mass,{})
return res
def post(self):
try:
id = mass[-1]['id'] + 1
newUser = request.get_json(force=True)
newUser['data']['id'] = id
mass.append({"id":id,"name":newUser['data']['attributes']['name'],"email":newUser['data']['attributes']['email'],"password":newUser['data']['attributes']['password']})
return newUser
except Exception as e:
print(e)
|
{"/app/api/users.py": ["/app/jsoongia/__init__.py"], "/app/router.py": ["/app/__init__.py", "/app/api/users.py"]}
|
21,903
|
andriisoroka/restapi
|
refs/heads/master
|
/app/__init__.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_restful import reqparse, abort, Api, Resource
app = Flask(__name__)
db = SQLAlchemy(app)
CORS(app)
app.config.from_object('config')
api = Api(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(250))
u = User()
print(u.query.all())
from app import views
from app import router
|
{"/app/api/users.py": ["/app/jsoongia/__init__.py"], "/app/router.py": ["/app/__init__.py", "/app/api/users.py"]}
|
21,904
|
andriisoroka/restapi
|
refs/heads/master
|
/app/jsoongia/__init__.py
|
from .import relationships
from .serializers import Serializer
|
{"/app/api/users.py": ["/app/jsoongia/__init__.py"], "/app/router.py": ["/app/__init__.py", "/app/api/users.py"]}
|
21,905
|
andriisoroka/restapi
|
refs/heads/master
|
/app/router.py
|
from app import api
from app.api.users import User,UserList
api.add_resource(User,'/api/users/<int:id>')
api.add_resource(UserList,'/api/users')
|
{"/app/api/users.py": ["/app/jsoongia/__init__.py"], "/app/router.py": ["/app/__init__.py", "/app/api/users.py"]}
|
21,906
|
andriisoroka/restapi
|
refs/heads/master
|
/config.py
|
SQLALCHEMY_DATABASE_URI = "mysql://root:123456@homepc/library"
|
{"/app/api/users.py": ["/app/jsoongia/__init__.py"], "/app/router.py": ["/app/__init__.py", "/app/api/users.py"]}
|
21,914
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Elements/bag.py
|
from __future__ import annotations
from typing import List, Dict
import random
class Bag:
def __init__(self, tile_types: List[int], tile_count: List[int]):
self.tiles: Dict[int, int] = {}
for (i, j) in zip(tile_types, tile_count):
self.tiles[i] = j
def is_empty(self):
return 0 == sum(self.tiles.values())
def count(self):
return sum(self.tiles.values())
def add_tile(self, tile_type):
self.tiles[tile_type] += 1
def add_bag(self, bag: Bag):
for (tile_type, tile_count) in bag.tiles:
if tile_type in self.tiles.keys():
self.tiles[tile_type] += tile_count
else:
self.tiles[tile_type] = tile_count
# reset dumped bag to 0
bag.tiles[tile_type] = 0
def draw_tile(self) -> int:
tile: int = random.choices(list(self.tiles.keys()), list(self.tiles.values()), k=1)[0]
self.tiles[tile] -= 1
return tile
def draw_tiles(self, n) -> List[int]:
return [self.draw_tile() for _ in range(0, n)]
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,915
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Elements/factory.py
|
from typing import List
from Engine.Elements.bag import Bag
from Engine.Elements.center import Center
class Factory:
def __init__(self, center: Center):
self.center = center
self.tiles: List[int] = []
def is_empty(self) -> bool:
return self.tiles == []
def fill_factory(self, bag: Bag):
# assume there are 4 tiles to draw
self.tiles: List[int] = bag.draw_tiles(4)
def claim_tile(self, color):
drawn = []
if color in self.tiles:
for tile in reversed(self.tiles):
if tile == color:
drawn.append(self.tiles.pop())
else:
self.center.add_tile(self.tiles.pop())
return True, drawn
else:
return False
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,916
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/GameLoop.py
|
from Engine.Player.player import Player
from Engine.Elements.bag import Bag
from Engine.Elements.board import Board
from Engine.Elements.center import Center
from Engine.Elements.discard import Discard
from Engine.Elements.factory import Factory
PlayerCount = int
default_bag = {
0: 20,
1: 20,
2: 20,
3: 20,
4: 20
}
class Game:
i = 0
def __init__(self, n: PlayerCount):
self.num_players = n
self.bag: Bag
self.discard: Discard
self.factories: list[Factory]
self.players: list[Player]
if n == 2:
num_factories = 5
elif n == 3:
num_factories = 7
elif n == 4:
num_factories = 9
else:
raise ValueError
self.bag = Bag(list(default_bag.keys()), list(default_bag.values()))
self.discard = Discard(self.bag)
self.center = Center()
self.factories = []
for i in range(0, num_factories):
self.factories += Factory(self.center)
self.players = []
for i in range(0, n):
player = Player(i, Board(), self.factories, )
self.players.append(player)
for i in range(0, n):
opponents: list[Player] = self.players.copy()
opponents.pop(i)
self.players[i].set_opponents(opponents)
self.starting_player: Player = self.players[0]
def fill_factories(self):
self.center.has_starting_tile = True
for factory in self.factories:
self.check_bag()
factory.fill_factory(self.bag)
for player in self.players:
if player.has_starting_marker:
self.starting_player = player
player.has_starting_marker = False
def check_bag(self):
if self.bag.count == 0:
self.bag.add_bag(self.discard)
if self.bag.count() < 4:
tiles = self.discard.draw_tiles(4 - self.bag.count())
for tile in tiles:
self.bag.add_tile(tile)
def set_starting_player(self):
idx = self.players.index(self.starting_player)
for _ in range(0, idx):
self.players.append(self.players.pop(0))
self.i = 0
return
def player_request(self):
# provide state to agent
return self.i, self.players[self.i].state()
def player_action(self, args):
# False if error
# substitute with argument parsing
success = self.players[self.i].make_choice(Factory(Center()), 0, 0)
if not success:
return False
self.i = (self.i + 1) % self.num_players
if self.no_tiles_remain():
for player in self.players:
player.end_turn_reset()
if player.has_starting_marker:
self.starting_player = player
player.has_starting_marker = False
self.center.has_starting_tile = True
self.fill_factories()
self.set_starting_player()
state = self.players[self.i].state()
score = self.players[self.i].score
end_game = self.end_game_cond_met()
# return True, new state, current score estimate, end game condition met
return True, state, score, end_game
def end_game_cond_met(self):
return any([player.end_game_condition_met() for player in self.players])
def no_tiles_remain(self):
for factory in self.factories:
if not factory.is_empty():
return False
if self.center.is_empty():
return True
return False
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,917
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Elements/center.py
|
class Center:
has_starting_tile = True
def __init__(self):
self.tiles = []
def is_empty(self):
return self.tiles == []
def add_tile(self, tile_type: int):
self.tiles += [tile_type]
def claim_tile(self, color):
ret = []
remaining = []
for tile in reversed(self.tiles):
if tile == color:
ret.append(self.tiles.pop())
else:
remaining.append(self.tiles.pop())
self.tiles = remaining
if ret == []:
return False, []
return True, ret
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,918
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Elements/board.py
|
from typing import List, Dict
from Engine.Elements.bag import Bag
# 0: Blue
# 1: Yellow
# 2: Red
# 3: Black
# 4: Cyan
def bag_from_dict(tile_dict: Dict[int, int]):
return Bag(list(tile_dict.keys()), list(tile_dict.values()))
class Board:
end_game_condition_met = False
rows: List[int] = [0, 0, 0, 0, 0]
row_color: List[int] = [None for _ in range(0, 5)]
row_is_filled: List[bool] = [False for _ in range(0, 5)]
wall_colors_filled: List[List[bool]] = [[False for _ in range(0, 5)] for _ in range(0, 5)]
wall: List[List[bool]] = [[False for _ in range(0, 5)] for _ in range(0, 5)]
# provide color
floor: List[int] = []
floor_penalty = [1, 1, 2, 2, 2, 3, 3]
score = 0
def end_turn_reset_rows(self):
ret_tiles = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0
}
for i, row in enumerate(self.rows):
row_capacity = i + 1
color = self.row_color[i]
if row_capacity == self.rows[i]:
self.fill_wall(i, color)
self.rows[i] = 0
ret_tiles[color] += row_capacity - 1
return bag_from_dict(ret_tiles)
def reset_floor(self):
ret_tiles = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0
}
deduction = 0
for i, color in enumerate(self.floor):
if color != -1:
ret_tiles[color] += 1
deduction += self.floor_penalty[i]
self.floor = []
return deduction, bag_from_dict(ret_tiles)
def fill_row(self, row: int, color: int, n: int):
if self.row_color[row] is None:
# starting to add a color to row
self.row_color[row] = color
elif self.row_color[row] != color:
# trying to add a different color to the tile row
return False
elif color in self.wall_colors_filled[row]:
# trying to add a color that's already present in the wall
return False
if self.row_is_filled[row]:
return False
row_capacity = row + 1
tiles_in_row = self.rows[row]
if tiles_in_row + n < row_capacity:
self.rows[row] = tiles_in_row + n
elif tiles_in_row + n == row_capacity:
self.rows[row] = tiles_in_row + n
self.row_is_filled[row] = True
else:
self.rows[row] = row_capacity
self.row_is_filled[row] = True
self.floor += [color] * (tiles_in_row + n - row_capacity)
return True
def fill_wall(self, i: int, color: int):
# 0: Blue
# 1: Yellow
# 2: Red
# 3: Black
# 4: Cyan
# right rotated by i rows
col = (i + color) % 5
self.wall[i][col] = True
self.wall_colors_filled[i][color] = True
self.score_tile(i, col)
# updates score
def score_tile(self, row, col):
horizontal = self.count_connected_horizontal(row, col)
vertical = self.count_connected_vertical(row, col)
if horizontal == 0 and vertical == 0:
self.score += 1
else:
self.score += horizontal + vertical
def remove_tile(self, row, col):
horizontal = self.count_connected_horizontal(row, col)
vertical = self.count_connected_vertical(row, col)
if horizontal == 0 and vertical == 0:
self.score -= 1
else:
self.score -= horizontal + vertical
self.wall[row][col] = False
self.wall_colors_filled[row][(col - row) % 5] = False
return self.score
def count_connected_vertical(self, row, col):
link_remains = True
length = 0
for i in range(row + 1, 5):
if link_remains and self.wall[i][col]:
length += 1
else:
link_remains = False
link_remains = True
for i in range(row - 1, -1, -1):
if link_remains and self.wall[i][col]:
length += 1
else:
link_remains = False
if length != 0:
return length + 1
return 0
def count_connected_horizontal(self, row, col):
link_remains = True
length = 0
for i in range(col + 1, 5):
if link_remains and self.wall[row][i]:
length += 1
else:
link_remains = False
link_remains = True
for i in range(col - 1, -1, -1):
if link_remains and self.wall[row][i]:
length += 1
else:
link_remains = False
if length != 0:
length += 1
if length == 5:
self.end_game_condition_met = True
return length
return 0
def score_bonus(self):
pass
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,919
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Player/ScoringApp.py
|
import wx
from Engine.Elements.board import Board
class AzulScoringApp(wx.Frame):
board = Board()
score = 0
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(400, 300))
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.score_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.quote = wx.StaticText(self, label="Player Score: " + str(self.score), pos=(20, 30))
self.score_sizer.Add(self.quote)
self.wall_sizer = wx.GridSizer(5, gap=(1, 1))
self.buttons = []
for i in range(0, 25):
self.buttons.append(wx.Button(self, id=i, label=""))
self.wall_sizer.Add(self.buttons[i], 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.toggleButton, source=self.buttons[i])
self.SetSizer(self.wall_sizer)
self.SetAutoLayout(1)
self.wall_sizer.Fit(self)
self.main_sizer.Add(self.score_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.main_sizer.Add(self.wall_sizer, 0, wx.CENTER)
self.Show()
def toggleButton(self, event: wx.Button):
id = event.Id
row = id // 5
col = id % 5
score: int
if event.EventObject.Label == "":
event.EventObject.Label = "X"
score = self.board.add_tile(row, col)
else:
event.EventObject.Label = ""
score = self.board.remove_tile(row, col)
self.quote.Label = "Player Score: " + str(score)
self.quote.LabelText = "Player Score: " + str(score)
app = wx.App(False)
frame = AzulScoringApp(None, "Azul Scoring App")
frame.Show(True)
app.MainLoop()
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,920
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Elements/discard.py
|
from Engine.Elements.bag import Bag
class Discard(Bag):
def __init__(self, bag: Bag):
super(Discard, self).__init__(list(bag.tiles.keys()), [0]*len(bag.tiles))
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,921
|
CallThemHunter/AzulAI
|
refs/heads/master
|
/Engine/Player/player.py
|
from __future__ import annotations
from Engine.Elements.board import Board
from Engine.Elements.center import Center
from Engine.Elements.discard import Discard
from Engine.Elements.factory import Factory
from typing import List, Union
class Player:
has_starting_marker = False
def __init__(self, player_id: int, board: Board, center: Center, discard: Discard, factories: List[Factory]):
self.id = player_id
self.score = 0
self._board = board
self._center = center
self._discard = discard
self._factories = factories
self._opponents: List[Player] = []
def set_opponents(self, opponents: List[Player]):
self._opponents = opponents
def end_game_condition_met(self):
return self._board.end_game_condition_met
def end_turn_reset(self):
self._board.end_turn_reset_rows()
deduction, discard_tiles = self._board.reset_floor()
self._board.score -= deduction
self.score = self._board.score
self._discard.add_bag(discard_tiles)
def state(self):
start_tile = self._center.has_starting_tile
rows = self._board.rows
wall = self._board.wall
opponent_rows = [player._board.rows for player in self._opponents]
opponent_wall = [player._board.wall for player in self._opponents]
center_tiles = [self._center.tiles]
factory_tiles = [factory.tiles for factory in self._factories]
return rows, wall, opponent_rows, opponent_wall, start_tile, center_tiles, factory_tiles
# interface for AI to make choices
def make_choice(self, source: Union[Center, Factory], color: int, row: int):
# return True if valid choice
# return False if invalid choice
if isinstance(source, Factory):
success, tiles = source.claim_tile(color)
if not success:
return False
elif isinstance(source, Center):
success, tiles = source.claim_tile(color)
if not success:
return False
if source.has_starting_tile:
self.has_starting_marker = True
source.has_starting_tile = False
# add starting tile to board.
self._board.floor += [-1]
else:
return False
# guaranteed to have 1 tile at least
# return False if wrong color, color already on wall, or row filled
success = self._board.fill_row(row, color, len(tiles))
if not success:
return False
return True
|
{"/Engine/Elements/factory.py": ["/Engine/Elements/bag.py", "/Engine/Elements/center.py"], "/Engine/GameLoop.py": ["/Engine/Player/player.py", "/Engine/Elements/bag.py", "/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"], "/Engine/Elements/board.py": ["/Engine/Elements/bag.py"], "/Engine/Player/ScoringApp.py": ["/Engine/Elements/board.py"], "/Engine/Elements/discard.py": ["/Engine/Elements/bag.py"], "/Engine/Player/player.py": ["/Engine/Elements/board.py", "/Engine/Elements/center.py", "/Engine/Elements/discard.py", "/Engine/Elements/factory.py"]}
|
21,923
|
Parseluni/tree-practice
|
refs/heads/master
|
/tests/test_binary_search_tree.py
|
import pytest
from binary_search_tree.tree import Tree
@pytest.fixture()
def empty_tree() -> Tree():
return Tree()
@pytest.fixture()
def tree_with_nodes(empty_tree) -> Tree():
empty_tree.add(5, "Peter")
empty_tree.add(3, "Paul")
empty_tree.add(1, "Mary")
empty_tree.add(10, "Karla")
empty_tree.add(15, "Ada")
empty_tree.add(25, "Kari")
return empty_tree
def test_add_and_find(tree_with_nodes):
assert tree_with_nodes.find(5) == "Peter"
assert tree_with_nodes.find(15) == "Ada"
assert tree_with_nodes.find(3) == "Paul"
def test_find_returns_none_for_empty_tree(empty_tree):
assert empty_tree.find(5) == None
def test_find_returns_value_in_tree(tree_with_nodes):
assert tree_with_nodes.find(25) == "Kari"
def test_find_returns_none_for_values_not_in_tree(tree_with_nodes):
assert tree_with_nodes.find(6) == None
def test_inorder_with_empty_tree(empty_tree):
answer = empty_tree.inorder()
assert empty_tree.inorder() == []
def test_inorder_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 1,
"value": "Mary"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 5,
"value": "Peter"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.inorder()
assert answer == expected_answer
def test_preorder_on_empty_tree(empty_tree):
assert empty_tree.preorder() == []
def test_preorder_on_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 5,
"value": "Peter"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 1,
"value": "Mary"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.preorder()
assert answer == expected_answer
def test_postorder_on_empty_tree(empty_tree):
assert empty_tree.postorder() == []
def test_postorder_on_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 1,
"value": "Mary"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 25,
"value": "Kari"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 5,
"value": "Peter"
}
]
answer = tree_with_nodes.postorder()
assert answer == expected_answer
def test_height_of_empty_tree_is_zero(empty_tree):
assert empty_tree.height() == 0
def test_height_of_one_node_tree(empty_tree):
empty_tree.add(5, "pasta")
assert empty_tree.height() == 1
def test_height_of_many_node_tree(tree_with_nodes):
assert tree_with_nodes.height() == 4
tree_with_nodes.add(2, "pasta")
tree_with_nodes.add(2.5, "bread")
assert tree_with_nodes.height() == 5
def test_bfs_with_empty_tree(empty_tree):
assert empty_tree.bfs() == []
def test_bfs_with_tree_with_nodes(tree_with_nodes):
expected_answer = [
{
"key": 5,
"value": "Peter"
},
{
"key": 3,
"value": "Paul"
},
{
"key": 10,
"value": "Karla"
},
{
"key": 1,
"value": "Mary"
},
{
"key": 15,
"value": "Ada"
},
{
"key": 25,
"value": "Kari"
}
]
answer = tree_with_nodes.bfs()
assert answer == expected_answer
|
{"/tests/test_binary_search_tree.py": ["/binary_search_tree/tree.py"]}
|
21,924
|
Parseluni/tree-practice
|
refs/heads/master
|
/binary_search_tree/tree.py
|
class TreeNode:
def __init__(self, key, val = None):
if val == None:
val = key
self.key = key
self.value = val
self.left = None
self.right = None
class Tree:
def __init__(self):
self.root = None
# Time Complexity: O(log n) *if balanced
# Space Complexity: O(1)
def add(self, key, value = None):
# edge case: if tree is empty, add node at root
if self.root == None:
self.root = TreeNode(key, value)
return None
# find the parent node
else:
parent = None
curr_node = self.root
while curr_node != None:
parent = curr_node
if key < curr_node.key:
curr_node = curr_node.left
else:
curr_node = curr_node.right
# determine on which side of the node to create the node
if key < parent.key:
parent.left = TreeNode(key, value)
else:
parent.right = TreeNode(key, value)
def add_helper(self, curr_node, key, value):
if curr_node == None:
return TreeNode(key, value)
if key < curr_node.key:
curr_node.left = self.add_helper(curr_node.left, key, value)
else:
curr_node.right = self.add_helper(curr_node.right, key, value)
return curr_node
# Time Complexity: O(log n) *if balanced
# Space Complexity: O(log n) *if balanced
def add_recursive(self, key, value = None):
if self.root == None:
self.root = TreeNode(key, value)
else:
# use helper method to add parameter
self.add_helper(self.root, key, value)
# Time Complexity: O(log n) *if balanced
# Space Complexity: O(1)
def find(self, key):
if self.root == None:
return None
curr_node = self.root
while curr_node != None:
if curr_node.key == key:
return curr_node.value
elif key < curr_node.key:
curr_node = curr_node.left
else:
curr_node = curr_node.right
return None
def find_helper(self, curr_node, key, value):
if curr_node == None:
return None
if key < curr_node.key:
curr_node.left = self.find_helper(curr_node.left, key, value)
else:
curr_node.right = self.find_helper(curr_node, key, value)
return curr_node
# Time Complexity: O(log n) *if balanced
# Space Complexity: O(log n) *if balanced
def find_recursive(self, key, value = None):
if self.root == None:
return None
else:
self.find_helper(self.root, key, value)
def preorder_helper(self, curr_node, trav_list):
if curr_node == None:
return
else:
trav_list.append({"key": curr_node.key, "value": curr_node.value})
self.preorder_helper(curr_node.left, trav_list)
self.preorder_helper(curr_node.right, trav_list)
# Time Complexity: O(n)
# Space Complexity: O(n)
def preorder(self):
if self.root == None:
return []
traversal_list = []
self.preorder_helper(self.root, traversal_list)
return traversal_list
def inorder_helper(self, curr_node, trav_list):
if curr_node == None:
return
else:
self.inorder_helper(curr_node.left, trav_list)
trav_list.append({"key": curr_node.key, "value": curr_node.value})
self.inorder_helper(curr_node.right, trav_list)
# Time Complexity: O(n)
# Space Complexity: O(n)
def inorder(self):
if self.root == None:
return []
traversal_list = []
self.inorder_helper(self.root, traversal_list)
return traversal_list
def postorder_helper(self, curr_node, trav_list):
if curr_node == None:
return
else:
self.postorder_helper(curr_node.left, trav_list)
self.postorder_helper(curr_node.right, trav_list)
trav_list.append({"key": curr_node.key, "value": curr_node.value})
# Time Complexity: O(n)
# Space Complexity: O(n)
def postorder(self):
if self.root == None:
return []
traversal_list = []
self.postorder_helper(self.root, traversal_list)
return traversal_list
def height_helper(self, curr_node):
if curr_node == None:
return 0
left_height = self.height_helper(curr_node.left)
right_height = self.height_helper(curr_node.right)
return max(left_height, right_height) + 1
# Time Complexity: O(n)
# Space Complexity: O(n)
def height(self):
if self.root == None:
return 0
return self.height_helper(self.root)
# # Optional Method
# # Time Complexity: O(n)
# # Space Complexity: O(n)
def bfs(self):
if self.root == None:
return []
queue = [self.root]
bfs_list = []
while len(queue) > 0:
curr_node = queue.pop(0)
if curr_node.left:
queue.append(curr_node.left)
if curr_node.right:
queue.append(curr_node.right)
bfs_list.append({"key": curr_node.key, "value": curr_node.value})
return bfs_list
# # Useful for printing
def to_s(self):
return f"{self.inorder()}"
|
{"/tests/test_binary_search_tree.py": ["/binary_search_tree/tree.py"]}
|
21,977
|
zyh88/PMU
|
refs/heads/master
|
/GAN_MULTI_LSTM_PMU.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
#%%
def load_data(start,SampleNum,N):
#read a pickle file
pkl_file = open('CompleteOneDay.pkl', 'rb')
selected_data = pickle.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(selected_data['1224'][f].iloc[0:int(N*SampleNum/2)+20].values)
select=np.array(select)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
pmu='1224'
shift=int(SampleNum/2)
train_data=np.zeros((N,12,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=12*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,12)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,12), input_shape=(12*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
def plot_generated_images(epoch, generator, examples=100, dim=(10,10), figsize=(10,10)):
scale=1
noise= scale*np.random.normal(loc=0, scale=1, size=[examples, 100])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(100,40,1)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.plot(generated_images[i])
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image %d.png' %epoch)
return generated_images
#%%
batch_size=200
epochnum=100
#%%
start,SampleNum,N=(0,40,100000)
#X_train = load_data(start,SampleNum,N)
X_train, selected, selected_data = load_data(start,SampleNum,N)
batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
def training(generator,discriminator,gan,epochs, batch_size):
scale=1
for e in range(1,epochs+1 ):
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,12)
# print(generated_images.shape)
# Get a random set of real images
image_batch =X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
toc = time.clock()
print(toc-tik)
# if e == 1 or e % 5 == 0:
#
# plot_generated_images(e, generator)
#batch_size=0
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
#%%
#
#gan.save('GPU_gan_mul_LSTM_N100000_e100_b200.h5')
#generator.save('GPU_generator_mul_LSTM_N100000_e100_b200.h5')
#discriminator.save('GPU_discriminator_mul_LSTM_N100000_e100_b200.h5')
#%%
gan=load_model('GPU_gan_mul_LSTM_N100000_e100_b200.h5')
generator=load_model('GPU_generator_mul_LSTM_N100000_e100_b200.h5')
discriminator=load_model('GPU_discriminator_mul_LSTM_N100000_e100_b200.h5')
#%%
start,SampleNum,N=(0,40,100000)
X_train,selected ,selected_data= load_data(start,SampleNum,N)
#batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
a=discriminator.predict_on_batch(X_train)
#%%
rate=100
shift=N/rate
scores=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores.append(temp)
print(i)
scores=np.array(scores)
scores=scores.ravel()
#%%
probability_mean=np.mean(scores)
a=scores-probability_mean
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
plt.plot(a.ravel())
plt.show()
#%%
data = a
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
# Plot the histogram.
plt.hist(data, bins=25, density=True, alpha=0.6, color='g')
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
#%%
high=mu+4*std
low=mu-4*std
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
anoms=np.union1d(np.where(a>=high)[0], np.where(a<=low)[0])
print(np.union1d(np.where(a>=high)[0], np.where(a<=low)[0]).shape)
tt=X_train.reshape(N,12*SampleNum)
tt=X_train.reshape(N,12,SampleNum)
#%%
normal=np.arange(100,110)
for i in anoms[0:100] :
print(i*int(SampleNum/2))
for j in range(12):
plt.plot(tt[i][j])
plt.legend(('vol', 'curr', 'p','q'),shadow=True, loc=(0.01, 0.48), handlelength=1.5, fontsize=16)
plt.show()
#%%
selected=pd.DataFrame(selected)
selected=selected.T
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
start=0
dur=int(N*20)
end=start+dur
#selected['color']='b'
#for i in anoms:
# print(i)
## print(i)
# selected['color'].iloc[i*int(SampleNum/2):((i+1)*int(SampleNum/2)+40)]='r'
#
#markers_on=np.where(selected['color'].iloc[start:end]=='r')
#plt.plot(selected[0].iloc[start:end], markevery=list(markers_on),marker='X',mec='r',mew=np.log(np.log(dur))
# ,ms=2*np.log(np.log(dur)),mfcalt='r')
#for i in range(5):
# plt.plot(selected[i].iloc[start:end])
# plt.show()
for j in [0,3,6,9]:
plt.plot(selected[j][start:end])
# plt.xlabel('timeslots',fontsize=28)
# plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
for i in anoms:
# print(i)
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
plt.axvspan(i*int(SampleNum/2), ((i+1)*int(SampleNum/2)+40), color='red', alpha=0.5)
plt.savefig('day %d.pdf' %j, format='pdf', dpi=1200)
plt.savefig('day %d.png' %j)
plt.show()
#plt.savefig('long.pdf', format='pdf', dpi=1200)
#plt.savefig('long %d.png' %dur)
#%%
dur_anoms=[]
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
dur_anoms.append([i*int(SampleNum/2),((i+1)*int(SampleNum/2)+20)])
plt.plot(selected[2].iloc[i*int(SampleNum/2)-20:((i+1)*int(SampleNum/2)+40)].values)
plt.xlabel('timeslots',fontsize=28)
plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
# plt.savefig('figures/event %d.png' %i)
# plt.savefig('figures/event %d.pdf' %i, format='pdf', dpi=1200)
plt.show()
print(dur_anoms)
print(len(dur_anoms))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,978
|
zyh88/PMU
|
refs/heads/master
|
/loading_data.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.io import loadmat
from natsort import natsorted
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
# =============================================================================
# =============================================================================
# # standardized data extraxtion
# =============================================================================
# =============================================================================
#filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_standardized_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L3MAG','L2MAG','L1MAG', 'C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QC', 'QB','QA']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
return select
# =============================================================================
# =============================================================================
# # real data extraxtion
# =============================================================================
# =============================================================================
#filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L3MAG','L2MAG','L1MAG', 'C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QC', 'QB','QA']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
def load_train_data(start,SampleNum,N,filename):
#read a pickle file
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
features=['L3MAG','L2MAG','L1MAG', 'C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QC', 'QB','QA']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[pmu][f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,12,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
def load_train_data_V(start,SampleNum,N,filename):
#read a pickle file
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
features=['L1MAG','L2MAG', 'L3MAG']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[pmu][f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,3,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
def load_data_with_features(filename,features):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
print(select.shape)
# select=preprocessing.scale(select,axis=1)
return select
def load_standardized_data_with_features(filename,features):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
return select
def load_train_vitheta_data_1225(start,SampleNum,N,filename,features):
#read a pickle file
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame.from_dict(selected_data)
# features=['L1MAG','L2MAG', 'L3MAG']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,9,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
def load_train_vitheta_data_V(start,SampleNum,N,filename,features):
#read a pickle file
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
# features=['L1MAG','L2MAG', 'L3MAG']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[pmu][f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,9,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
#here we Import raw data for March 9th for all three PMUs and saved each pmu separately
# =============================================================================
# ###Import raw data for MArch 9th for all three PMUs
# def all_4_PMU_data():
# whole_data={}
# dir = 'Raw_data/'
# files = os.listdir(dir)
# files = natsorted(files)
# PMU=['1086','1224','1200','1225']
# for p in PMU:
# whole_data[p]={}
#
# for f in files:
# print(f)
# #print(dir+f)
# temp_data=pd.read_csv(dir+f)
# k=temp_data.keys()
#
# for key in k:
# # print(key)
# for p in PMU:
# # print(p)
# if (p in key.split('/')) :
#
# # print(key.split('/'))
# # print(key.split('/')[2].split(' ')[0])
# if (key.split('/')[2].split(' ')[1]=='(Mean)') and (key.split('/')[2].split(' ')[0]!='LSTATE'):
# # print(p)
# col=key.split('/')[2].split(' ')[0]
# # print(col)
# if col in whole_data[p]:
#
# whole_data[p][col]=np.append(whole_data[p][col],temp_data[key].values)
# # whole_data[p][col].append(list(temp_data[key].values))
# print(len(whole_data[p][col]))
# else:
# print(col)
# whole_data[p][col]=temp_data[key].values
#
#
# return whole_data
#
# #%%
# PMU=['1086','1224','1200','1225']
# for p in PMU:
# dir = 'Raw_data/'
# os.mkdir(dir+p)
# output = open(dir+p+'/data', 'wb')
# pkl.dump(whole[p], output)
# output.close()
# #%%
#
# =============================================================================
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,979
|
zyh88/PMU
|
refs/heads/master
|
/model event detection accuracy.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
import xlrd
#%%
# =============================================================================
# =============================================================================
# # Read the event files for each model
# =============================================================================
# =============================================================================
dir='figures/all_events/'
event_points={}
events_acc_detail={}
for i in range(4):
file=dir+'July_0'+str(i+3)
GAN_events_file=file+'/GAN/anoms_july_0'+str(i+3)+'.csv'
GAN_voltage_events_file=file+'/GAN_voltage/anoms_voltage_july_0'+str(i+3)+'.csv'
Window_events_file=file+'/window/anoms_july_0'+str(i+3)+'.csv'
GAN=pd.read_csv(GAN_events_file,header=None)[0].values
GANV=pd.read_csv(GAN_voltage_events_file,header=None)[0].values
window=pd.read_csv(Window_events_file,header=None)[0].values
GAN_events_file=file+'/no_event'+'.xlsx'
GAN_voltage_events_file=file+'/no_event_v'+'.xlsx'
GANN=pd.read_excel(GAN_events_file)
GANVN=pd.read_excel(GAN_voltage_events_file)
GANVN=GANVN['GAN voltage'].values
windowN=GANN['window'].values
GANN=GANN['GAN'].values
GANN = GANN[~np.isnan(GANN)]
GANVN = GANVN[~np.isnan(GANVN)]
windowN = windowN[~np.isnan(windowN)]
event_points[i+3]={}
event_points[i+3]['GAN_event']=np.setdiff1d(GAN,GANN)
event_points[i+3]['GANV_event']=np.setdiff1d(GANV,GANVN)
event_points[i+3]['GANV_total']=np.union1d(GAN,GANV)
event_points[i+3]['GAN_total_events']=np.union1d(event_points[i+3]['GAN_event'],event_points[i+3]['GANV_event'])
event_points[i+3]['window_event']=np.setdiff1d(window,windowN)
all_event_points=[]
for event in event_points[i+3]['GAN_total_events']:
# points=np
low=event*20-240
high=event*20+240
rng=np.arange(low,high)
all_event_points.append(rng)
all_event_points =np.array(all_event_points)
mutual_GAN_window=[]
for j in event_points[i+3]['window_event']:
if j in all_event_points:
mutual_GAN_window.append(j)
mutual_GAN_window=np.array(mutual_GAN_window)
event_points[i+3]['mutual_GAN_window']=mutual_GAN_window
whole_event_number=event_points[i+3]['GAN_total_events'].shape[0]+event_points[i+3]['window_event'].shape[0]-mutual_GAN_window.shape[0]
events_acc_detail[i+3]={}
events_acc_detail[i+3]['whole_detected_number']=event_points[i+3]['GANV_total'].shape[0]+window.shape[0]-mutual_GAN_window.shape[0]
events_acc_detail[i+3]['whole_event_number']=whole_event_number
TP=events_acc_detail[i+3]['GAN_TP']=event_points[i+3]['GAN_total_events'].shape[0]
FP=events_acc_detail[i+3]['GAN_FP']=event_points[i+3]['GANV_total'].shape[0]-event_points[i+3]['GAN_total_events'].shape[0]
FN=events_acc_detail[i+3]['GAN_FN']=whole_event_number-event_points[i+3]['GAN_total_events'].shape[0]
TN=events_acc_detail[i+3]['GAN_TN']=events_acc_detail[i+3]['whole_detected_number']-(events_acc_detail[i+3]['GAN_TP']+events_acc_detail[i+3]['GAN_FP']+events_acc_detail[i+3]['GAN_FN'])
events_acc_detail[i+3]['GAN_accuracy']=(TP+TN)/(TP+TN+FP+FN)
events_acc_detail[i+3]['GAN_F1score']=(2*TP)/(2*TP+FP+FN)
events_acc_detail[i+3]['GAN_MCC']=((TP*TN)-(FP*FN))/np.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
TP=events_acc_detail[i+3]['W_TP']=event_points[i+3]['window_event'].shape[0]
FP=events_acc_detail[i+3]['W_FP']=windowN.shape[0]
FN=events_acc_detail[i+3]['W_FN']=whole_event_number-event_points[i+3]['window_event'].shape[0]
TN=events_acc_detail[i+3]['W_TN']=events_acc_detail[i+3]['whole_detected_number']-(events_acc_detail[i+3]['W_TP']+events_acc_detail[i+3]['W_FP']+events_acc_detail[i+3]['W_FN'])
events_acc_detail[i+3]['W_accuracy']=(TP+TN)/(TP+TN+FP+FN)
events_acc_detail[i+3]['W_F1score']=(2*TP)/(2*TP+FP+FN)
events_acc_detail[i+3]['W_MCC']=((TP*TN)-(FP*FN))/np.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
print(i)
#events_acc_detail[i+3][GAN]={}
#events_acc_detail[i+3][GAN][]
#print(event_points[i+3]['window_event'].shape)
#print(all_event_points.shape)27
#%%
G_TP=0
G_FP=0
G_FN=0
G_TN=0
W_TP=0
W_FP=0
W_FN=0
W_TN=0
for day in events_acc_detail:
G_TP+=events_acc_detail[day]['GAN_TP']
G_FP+=events_acc_detail[day]['GAN_FP']
G_FN+=events_acc_detail[day]['GAN_FN']
G_TN+=events_acc_detail[day]['GAN_TN']
W_TP+=events_acc_detail[day]['W_TP']
W_FP+=events_acc_detail[day]['W_FP']
W_FN+=events_acc_detail[day]['W_FN']
W_TN+=events_acc_detail[day]['W_TN']
print(day)
events_acc_detail['all']={}
TP=G_TP
FP=G_FP
FN=G_FN
TN=G_TN
events_acc_detail['all']['GAN_whole_Days_accuracy']=(TP+TN)/(TP+TN+FP+FN)
events_acc_detail['all']['GAN_whole_Days_F1score']=(2*TP)/(2*TP+FP+FN)
events_acc_detail['all']['GAN_whole_Days_MCC']=((TP*TN)-(FP*FN))/math.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
TP=W_TP
FP=W_FP
FN=W_FN
TN=W_TN
events_acc_detail['all']['W_whole_Days_accuracy']=(TP+TN)/(TP+TN+FP+FN)
events_acc_detail['all']['W_whole_Days_F1score']=(2*TP)/(2*TP+FP+FN)
events_acc_detail['all']['W_whole_Days_MCC']=((TP*TN)-(FP*FN))/math.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
#%%
# =============================================================================
# =============================================================================
# # the ones are in the window but GAN did not extracted
# =============================================================================
# =============================================================================
WyGANn=np.setdiff1d(event_points[6]['window_event'],mutual_GAN_window)
#mutual_shifts=[]
#for u in mutual_GAN_window:
# u=int(u)
# low=u-240
# high=u+240
# rng=np.arange(low,high)
# mutual_shifts.append(rng)
#GANyWn=np.setdiff1d(all_event_points,mutual_GAN_window)
##%%
#GANyWn=np.unique(np.floor(GANyWn/20))
#
#%%
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
#%%
filename='data/Armin_Data/July_06/pkl/J6.pkl'
select_1224=load_real_data(filename)
#%%
start,SampleNum,N=(0,40,500000)
for point in WyGANn:
print(point)
point=int(point)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][point-120:point+120])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][point-120:point+120])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][point-120:point+120])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][point-120:point+120])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,980
|
zyh88/PMU
|
refs/heads/master
|
/plot paper figures.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_data_with_features
from sklearn.ensemble import IsolationForest
#%%
filename='data/Armin_Data/July_03/pkl/J3.pkl'
start,SampleNum,N,filename=(0,40,500000,filename)
select_1224=load_real_data(filename)
#%%
filename='data/Armin_Data/July_03/pkl/rawdata3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang']
#%%
dds=load_standardized_data_with_features(filename,k)
#%%
dd=load_data_with_features(filename,k)
#%%
anom_select=[60613]
#anom_select=[350,351,3182,4743,7419,49465,57881,67737,69018,88255,254519,127594,144417,12901,254742,12914,13130,26959,30703,496291]
#anom_select=[36687, 37490, 41092, 54565, 66277, 84418, 85595, 322135, 338446, 425659, 354777,339351, 252725]
scale=8
shift=0
k=0
select_1224=dd
for anom in anom_select:
k+=1
print(anom)
anom=int(anom)
plt.subplot(221)
for i in [2,1,0]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift)])
# plt.legend('A' 'B' 'C',fontsize= 20,loc=6)
plt.yticks(fontsize=15)
# plt.ylim([7100,7230])
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
plt.title('V (magnitude)',fontsize= 30)
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift)])
# plt.legend('A' 'B' 'C')
plt.title('V (Angle)',fontsize= 30)
plt.yticks(fontsize=15)
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift)]/1000)
# plt.legend('A' 'B' 'C')
plt.title('I (Magnitude)',fontsize= 30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.subplot(224)
for i in [11,10,9]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift)]/1000)
# plt.legend('A' 'B' 'C')
plt.title('I (Angle)',fontsize= 30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# plt.savefig('event.pdf', format='pdf')
# figname='figures/paper/huge_osc.pdf'
# plt.savefig(figname)
plt.show()#%%
#%%
# =============================================================================
# jsut GAN scores
# =============================================================================
plt.scatter(whole_features['scores_V'], whole_features['scores'],color=whole_features['color'])
#plt.legend('Noraml' 'Events',fontsize= 20,loc=6)
plt.yticks(fontsize=15)
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
plt.xlabel('MPM',fontsize= 30)
plt.ylabel('MV',fontsize= 30)
#%%
# =============================================================================
# =============================================================================
# # all proposed model
# =============================================================================
# =============================================================================
#%%
zp=3.1
anoms31={}
names=['scores','scores_V','maxvar','maxmaxmin']
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
anoms31[names[i]]=anoms_1224
#%%
t1=np.union1d(anoms31['scores'],anoms31['scores_V'])
t2=np.union1d(anoms31['maxvar'],anoms31['maxmaxmin'])
total_events=np.union1d(t1,t2)
#%%
whole_features['new_anoms']=np.zeros((N,1))
for i in total_events:
i=int(float(i))
whole_features['new_anoms'][i]=1
#%%
an=0
whole_features['new_color']=[]
for i in whole_features['new_anoms']:
# print(i)
if int(i) == 0:
whole_features['new_color'].append('b')
else:
an+=1
whole_features['new_color'].append('r')
whole_features['new_color']=np.array(whole_features['new_color'])
print(an)
#%%
plt.scatter(whole_features['maxmaxmin'], whole_features['maxvar'],color=whole_features['new_color'])
#plt.legend('Noraml' 'Events',fontsize= 20,loc=6)
plt.yticks(fontsize=15)
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
plt.xlabel('MPM',fontsize= 30)
plt.ylabel('MV',fontsize= 30)
#%%
# =============================================================================
# =============================================================================
# # proposed
# =============================================================================
# =============================================================================
total=3152
t_ev=2621
TP,TN,FP,FN=[2321,60,60,200]
acc=(TP+TN)/(TP+TN+FP+FN)
f1=(2*TP)/(2*TP+FP+FN)
mcc=((TP*TN)-(FP*FN))/np.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
print(acc,f1,mcc)
#%%
# =============================================================================
# =============================================================================
# # GAN empty
# =============================================================================
# =============================================================================
total=3152
t_ev=2621
TP,TN,FP,FN=[2321-300,160,120,200+300]
acc=(TP+TN)/(TP+TN+FP+FN)
f1=(2*TP)/(2*TP+FP+FN)
mcc=((TP*TN)-(FP*FN))/np.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
print(acc,f1,mcc)
#%%
# =============================================================================
#
# =============================================================================
# =============================================================================
# benchmark
# =============================================================================
total=3152
t_ev=2621
TP,TN,FP,FN=[450,460,90,1500]
acc=(TP+TN)/(TP+TN+FP+FN)
f1=(2*TP)/(2*TP+FP+FN)
mcc=((TP*TN)-(FP*FN))/np.sqrt((TP+FP)*(TP+FN)*(TN*FP)*(TN*FN))
print(acc,f1,mcc)
#%%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # correlation plot for ivpq
# =============================================================================
# =============================================================================
# =============================================================================
corr={}
days=np.arange(3,18)
for d in days:
cr=np.zeros((12,12))
if d<10:
filename='data/Armin_Data/July_0'+str(d)+'/pkl/J'+str(d)+'.pkl'
else:
filename='data/Armin_Data/July_'+str(d)+'/pkl/J'+str(d)+'.pkl'
data=load_real_data(filename)
for i in range(12):
print(i)
for j in range(12):
if i >=j:
cr[i,j]=np.corrcoef(data[i],data[j])[0,1]
cr[j,i]=cr[i,j]
sns.heatmap(cr)
corr[d]=cr
#%%
for d in corr:
print(d)
sns.heatmap(corr[d])
plt.show()
#%%
sns.heatmap(corr[15])
#%%
anom_select=[30855, 35292, 46381, 49019, 49998, 74174]
anom_select=[322691]
scale=1100
shift=1283000
for anom in anom_select:
print(anom)
anom=int(anom)
plt.subplot(221)
for i in [2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift-20000)])
plt.legend('A' 'B' 'C',fontsize= 20,loc=6)
plt.yticks(fontsize=15)
plt.ylim([7120,7200])
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
plt.title('V (Volts)',fontsize= 30)
# plt.xlabel('Timeslots',fontsize= 30)
# plt.xticks(fontsize=15)
# plt.yticks(fontsize=15)
#
#
plt.subplot(222)
for i in [3]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift-20000)])
# plt.legend('A' 'B' 'C')
plt.title('I (Amps)',fontsize= 30)
plt.yticks(fontsize=15)
plt.ylim([100,150])
plt.subplot(223)
for i in [6]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift-20000)]/1000)
# plt.legend('A' 'B' 'C')
plt.title('P (kW)',fontsize= 30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylim([900,1040])
plt.subplot(224)
for i in [11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-40*scale+shift:(anom*int(SampleNum/2)+40*scale+shift-20000)]/1000)
# plt.legend('A' 'B' 'C')
plt.title('Q (kVAR)',fontsize= 30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# figname='figures/paper/huge_osc.pdf'
# plt.savefig(figname)
plt.show()#%%
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,981
|
zyh88/PMU
|
refs/heads/master
|
/GAN_LSTM_PMU.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import keras
from keras.layers import Dense, Dropout, Input,Embedding, Flatten
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import os
import pickle
import operator
import math
#%%
def load_data(start,SampleNum,N):
#read a pickle file
pkl_file = open('CompleteOneDay.pkl', 'rb')
selected_data = pickle.load(pkl_file)
pkl_file.close()
for pmu in selected_data:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
select=selected_data['1224']['C1MAG'].iloc[0:int(N*SampleNum/2)].values
end=start+SampleNum
pmu='1224'
shift=int(SampleNum/2)
train_data=[]
for i in range(N):
train_data.append(selected_data[pmu]['C1MAG'][start+i*shift:end+i*shift]-np.mean(selected_data[pmu]['C1MAG'][start+i*shift:end+i*shift]))
x_train=np.array(train_data)
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return x_train,select
#X_train=load_data()
#print(X_train.shape)
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(Embedding(input_dim=100,output_dim=1,input_length=10))
generator.add(Flatten())
generator.add(Dense(units=256,input_dim=100))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
generator.add(LSTM(units=512,return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(LSTM(units=512,return_sequences=False))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(Embedding(, 1, input_length=40))
discriminator.add(F)
discriminator.add(Dense(units=1024,input_dim=40))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(LSTM(units=512,return_sequences=True))
discriminator.add(LeakyReLU(0.2))
discriminator.add(LSTM(units=512,return_sequences=False))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,))
x = generator(gan_input)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
def plot_generated_images(epoch, generator, examples=100, dim=(10,10), figsize=(10,10)):
scale=1
noise= scale*np.random.normal(loc=0, scale=1, size=[examples, 100])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(100,40,1)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.plot(generated_images[i])
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image %d.png' %epoch)
return generated_images
#%%
batch_size=100
start,SampleNum,N=(0,40,5000)
X_train, selected = load_data(start,SampleNum,N)
batch_count = X_train.shape[0] / batch_size
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
def training(generator,discriminator,gan,epochs, batch_size=100):
scale=1
for e in range(1,epochs+1 ):
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
# Get a random set of real images
image_batch =X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
# if e == 1 or e % 5 == 0:
#
# plot_generated_images(e, generator)
batch_size=200
epochnum=20
training(generator,discriminator,gan,epochnum,batch_size)
#%%
reducedmean=[]
count=0
for i in X_train:
if count%2==0:
reducedmean.append(i)
count+=1
reducedmean=np.array(reducedmean)
reducedmean=reducedmean.ravel()
plt.plot(reducedmean)
plt.savefig('reduced.png')
reducedmean=pd.DataFrame(reducedmean)
#%%
a=[]
count=0
for i in range(N):
a.append(discriminator.predict(X_train[i].reshape(1,SampleNum)))
a=np.array(a)
plt.plot(a.ravel())
plt.show()
#%%
high=.99
low=0.01
anoms=np.union1d(np.where(a>high)[0], np.where(a<low)[0])
print(np.union1d(np.where(a>high)[0], np.where(a<low)[0]).shape)
for i in anoms :
# print(i)
plt.plot(X_train[i])
plt.show()
#%%
selected=pd.DataFrame(selected)
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 60
fig_size[1] = 30
plt.rcParams["figure.figsize"] = fig_size
start=0
dur=1000000
end=start+dur
selected['color']='b'
for i in anoms:
# print(i)
selected['color'].iloc[i*int(SampleNum/2):((i+1)*int(SampleNum/2)+40)]='r'
markers_on=np.where(selected['color'].iloc[start:end]=='r')
#plt.plot(selected[0].iloc[start:end], markevery=list(markers_on),marker='X',mec='r',mew=np.log(np.log(dur))
# ,ms=2*np.log(np.log(dur)),mfcalt='r')
plt.plot(selected[0].iloc[start:end])
plt.xlabel('timeslots',fontsize=28)
plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
plt.axvspan(i*int(SampleNum/2), ((i+1)*int(SampleNum/2)+40), color='red', alpha=0.5)
plt.savefig('long.pdf', format='pdf', dpi=1200)
plt.savefig('long %d.png' %dur)
#%%
dur_anoms=[]
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
dur_anoms.append([i*int(SampleNum/2),((i+1)*int(SampleNum/2)+20)])
plt.plot(selected[0].iloc[i*int(SampleNum/2)-20:((i+1)*int(SampleNum/2)+40)].values)
plt.xlabel('timeslots',fontsize=28)
plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
plt.savefig('figures/event %d.png' %i)
plt.savefig('figures/event %d.pdf' %i, format='pdf', dpi=1200)
plt.show()
print(dur_anoms)
print(len(dur_anoms))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,982
|
zyh88/PMU
|
refs/heads/master
|
/Threshold.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V
from scipy import stats
from sklearn.ensemble import IsolationForest
#%%
# =============================================================================
# =============================================================================
# # select the desired day standardized data
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_07/pkl/J7.pkl'
#%%
selected=load_standardized_data(filename)
#%%
# =============================================================================
# =============================================================================
# # load the best GAN model
# =============================================================================
# =============================================================================
gan=load_model('GPU_gan_mul_LSTM_twolayer_N500000_e1000_b100.h5')
generator=load_model('GPU_generator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
discriminator=load_model('GPU_discriminator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
#%%
# =============================================================================
# =============================================================================
# # Load training data
# =============================================================================
# =============================================================================
start,SampleNum,N,filename=(0,40,500000,filename)
#%%
X_train= load_train_data(start,SampleNum,N,filename)
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
rate=1000
shift=N/rate
scores=[]
for i in range(rate):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores.append(temp)
print(i)
scores=np.array(scores)
scores=scores.ravel()
probability_mean=np.mean(scores)
a=scores-probability_mean
#%%
ganV=load_model('GPU_gan_voltage_N500000_e100_b10_1225.h5')
generatorV=load_model('GPU_generator_voltage_N500000_e100_b10_1225.h5')
discriminatorV=load_model('GPU_discriminator_voltage_N500000_e1000_b10_1225.h5')
#%%
start,SampleNum,N,filename=(0,40,500000,filename)
#%%
X_train_V= load_train_data_V(start,SampleNum,N,filename)
#%%
X_train_V=X_train_V.reshape(N,3*SampleNum)
X_train_V=X_train_V.reshape(N,SampleNum,3)
rate=1000
shift=N/rate
scoresV=[]
for i in range(rate):
temp=discriminatorV.predict_on_batch(X_train_V[int(i*shift):int((i+1)*shift)])
scoresV.append(temp)
print(i)
scoresV=np.array(scoresV)
scoresV=scoresV.ravel()
probability_meanV=np.mean(scoresV)
aV=scoresV-probability_meanV
#%%
whole_features={}
whole_features['scores']=[]
whole_features['maxmin']=[]
whole_features['var']=[]
for i in range(N):
maxmin=[]
var=[]
for j in range(12):
maxmin.append(np.max(X_train[i][:,j])-np.min(X_train[i][:,j]))
var.append(np.var(X_train[i][:,j]))
whole_features['scores'].append(a[i])
whole_features['maxmin'].append(maxmin)
whole_features['var'].append(var)
if i% 10000==0:
print('iter num: %i', i)
#%%
whole_features['scores']=np.array(whole_features['scores'])
whole_features['maxmin']=np.array(whole_features['maxmin'])
whole_features['var']=np.array(whole_features['var'])
#%%
whole_features['scores_V']=scoresV
#%%
whole_features['scores_scale_V']=preprocessing.scale(aV)
#%%
whole_features['maxmaxmin']=np.max(whole_features['maxmin'],axis=1)
whole_features['maxvar']=np.max(whole_features['var'],axis=1)
#%%
whole_features['scores_scale']=preprocessing.scale(whole_features['scores'])
#%%
# =============================================================================
# mark the anomalies
# =============================================================================
excel_file='figures/all_events/July_03/GAN/anoms_July_03.csv'
anomalies=pd.read_csv(excel_file,header=None)[0]
#%%
# =============================================================================
# =============================================================================
# # event_points come from "model event detection accurcy .py"
# =============================================================================
# =============================================================================
event_points[3]['GAN_total_events']
#%%
whole_features['anoms']=np.zeros((N,1))
for i in event_points[3]['GAN_total_events']:
i=int(float(i))
whole_features['anoms'][i]=1
#%%
an=0
whole_features['color']=[]
for i in whole_features['anoms']:
# print(i)
if int(i) == 0:
whole_features['color'].append('b')
else:
an+=1
whole_features['color'].append('r')
whole_features['color']=np.array(whole_features['color'])
print(an)
#%%
output = open('data/Armin_data/oneday_3d_events.pkl', 'wb')
pkl.dump(whole_features, output)
output.close()
#%%
pkl_file = open('data/Armin_data/oneday_3d_events.pkl', 'rb')
whole_features = pkl.load(pkl_file)
pkl_file.close()
#%%
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(whole_features['maxmaxmin'], whole_features['maxvar'], whole_features['scores_scale'],color=whole_features['color'])
ax.set_xlabel('MPM')
ax.set_ylabel('MV')
ax.set_zlabel('Scaled GAN scores')
#%%
blue_index=[np.where((0.04 <= whole_features['maxvar'][0:10000]) & (whole_features['maxvar'][0:10000] <= 0.05))]
#%%
X=np.zeros((N,4))
X[:,0]=whole_features['scores_scale']
X[:,3]=whole_features['scores_scale_V']
X[:,1]=whole_features['maxmaxmin']
X[:,2]=whole_features['maxvar']
#%%
rng = np.random.RandomState(42)
clf = IsolationForest(behaviour='new', max_samples=1000,
random_state=rng, contamination='auto')
clf.fit(X)
y_pred_train = clf.predict(X)
#%%
for anom in blue_index[0][0]:
print(anom)
anom=int(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(selected[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(selected[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(selected[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(selected[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%
whole_features['maxmaxmin_scale']=preprocessing.scale(whole_features['maxmaxmin'])
whole_features['maxvar_scale']=preprocessing.scale(whole_features['maxvar'])
#%%
lamb=3
data =np.log(whole_features['maxvar'])
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
# Plot the histogram.
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
# Plot the PDF.
#xmin, xmax = plt.xlim()
#
#x = np.linspace(xmin, xmax, 100)
#p = norm.pdf( mu, std)
#plt.plot(p, 'k', linewidth=2)
#title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
#plt.title(title)
plt.show()
#%%
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
#%%
#X_train=np.zeros((N,2))
#X_train[:,0]=np.log(whole_features['maxmaxmin'])
#X_train[:,1]=np.log(whole_features['maxvar'])
data=np.log(whole_features['scores_V']).reshape(-1,1)
#for i in range(10):
n=2
clf = mixture.GaussianMixture(n_components=n, covariance_type='full')
clf.fit(data)
print(clf.bic(data))
for i in range(n):
print(clf.means_[i][0]-3*clf.covariances_[i][0][0],clf.means_[i][0]+3*clf.covariances_[i][0][0])
print(clf.means_)
print(clf.covariances_)
#%%
#np.prod(clf.covariances_)
#np.mean(clf.means_)
#
from scipy import stats
# Plot the histogram.
data=(whole_features['maxvar']).reshape(-1,1)
#data=data-np.mean(data)
data=stats.boxcox(data)
data=np.log(data)
lamb=3
data=(data-1)**lamb/lamb
#mu, std = norm.fit(data)
plt.hist(data[0], bins=1000, density=True, alpha=0.6, color='g')
#%%
# =============================================================================
# =============================================================================
# # plot the histogram of different features
# =============================================================================
# =============================================================================
from scipy import stats
#%%
fig = plt.figure()
ax1 = fig.add_subplot(221)
data=(whole_features['maxvar'])
xt, lmbda = stats.boxcox(data)
prob = stats.probplot(data, dist=stats.norm, plot=ax1)
ax1.set_xlabel('MV before transformation')
ax1.set_title('')
#ax1.set_title('Probplot after Yeo-Johnson transformation')
ax2 = fig.add_subplot(222)
data=(whole_features['maxmaxmin'])
xt, lmbda = stats.boxcox(data)
prob = stats.probplot(data ,dist=stats.norm, plot=ax2)
ax2.set_title('')
ax2.set_xlabel('MPM before transformation')
ax3 = fig.add_subplot(223)
data=(whole_features['maxvar'])
xt, lmbda = stats.boxcox(data)
prob = stats.probplot(xt, dist=stats.norm, plot=ax3)
ax3.set_title('')
ax3.set_xlabel('MV after transformation')
#ax1.set_title('Probplot after Yeo-Johnson transformation')
ax4 = fig.add_subplot(224)
data=(whole_features['maxmaxmin'])
xt, lmbda = stats.boxcox(data)
prob = stats.probplot(xt ,dist=stats.norm, plot=ax4)
ax4.set_title('')
ax4.set_xlabel('MPM after transformation')
#fig.suptitle('Probability Plot')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
wspace=0.35)
#plt.savefig('figures/paper/before_after_transformation.',dpi=300, bbox_inches='tight')
#%%
data=[]
#xt, lmbda = stats.boxcox((whole_features['scores'])+1)
#xt=preprocessing.scale(xt)
data.append(whole_features['scores'])
#xt, lmbda = stats.boxcox((whole_features['scores_V'])+1)
#xt=preprocessing.scale(xt)
data.append(whole_features['scores_V'])
xt, lmbda = stats.boxcox((whole_features['maxvar']))
#xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['maxmaxmin']))
#xt=preprocessing.scale(xt)
data.append(xt)
data=np.array(data)
#%%
mean = np.mean(data,axis=1)
cov = np.cov(data)
#%%
data=[]
xt, lmbda = stats.boxcox((whole_features['scores'])+1)
xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['scores_V'])+1)
xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['maxvar']))
xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['maxmaxmin']))
xt=preprocessing.scale(xt)
data.append(xt)
data=np.array(data)
#%%
mean = np.mean(data,axis=1)
cov = np.cov(data)
rv=multivariate_normal(mean,cov)
x=np.transpose(data)
y=rv.pdf(x)
#%%
#%%
# =============================================================================
# =============================================================================
# # extract the anomalies wrt each feature
# =============================================================================
# =============================================================================
zp=2
names=['scores','scores_V','maxvar','maxmaxmin']
anoms={}
for i in names:
anoms[i]=[]
for zp in np.arange(2.5,5,0.1):
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
anoms[names[i]].append(anoms_1224.shape)
#%%
# =============================================================================
# =============================================================================
# # different Zp
# =============================================================================
# =============================================================================
zp=np.arange(2.5,5,0.1)
for i in anoms:
plt.plot(zp,anoms[i])
plt.yticks(fontsize=15)
plt.legend(('GAN', 'GANV', 'MV', 'MP'),fontsize= 20)
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
plt.xlabel('Thresold (Zp)',fontsize= 30)
plt.ylabel('Number of detected aevents',fontsize= 30)
plt.show()
#%%
filename='data/Armin_Data/July_03/pkl/J3.pkl'
select_1224=load_real_data(filename)
#%%
zp=3.1
anoms31={}
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
anoms31[names[i]]=anoms_1224
#%%
temp_anom=np.union1d(anoms['scores'],anoms['scores_V'])
maxs=np.union1d(anoms['maxvar'],anoms['maxmaxmin'])
temp_anom=np.setdiff1d(temp_anom,maxs)
#temp_anom=np.setdiff1d(temp_anom,anoms['maxmaxmin'])
temp_anom.shape
#%%
temp_anom=np.union1d(anoms32['scores'],anoms32['scores_V'])
maxs=np.union1d(anoms32['maxvar'],anoms32['maxmaxmin'])
tt=np.setdiff1d(maxs,temp_anom)
s=np.setdiff1d(temp_anom,maxs)
total=np.union1d(temp_anom,maxs)
backtoback=[]
for i in tt:
if np.min(np.abs(temp_anom- i)) < 3:
backtoback.append(i)
print(len(backtoback))
tt=np.setdiff1d(tt,backtoback)
print(tt.shape)
#%%
def rep_check(inp):
output=[]
for i in range(inp.shape[0]-1):
if not np.min(np.abs(inp[i+1]- inp[i])) < 3:
output.append(inp[i])
output=np.array(output)
return output
#%%
riz=np.setdiff1d(rep_check(anoms3['maxvar']),rep_check(anoms31['maxvar']))
#%%
for anom in np.arange(145,166):
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-80:(anom*int(SampleNum/2)+80)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-80:(anom*int(SampleNum/2)+80)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-80:(anom*int(SampleNum/2)+80)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-80:(anom*int(SampleNum/2)+80)])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%
plt.show()
plt.subplot(221)
data=(whole_features['scores_V']+1).reshape(-1,1)
#data=np.log(data)
xt, lmbda = stats.boxcox((whole_features['scores_V'])+1)
plt.hist(xt, bins=1000, density=True, alpha=0.6, color='g')
#plt.xlim(-4, -0.5)
#plt.ylim(0, 0.03)
#plt.axis('off')
plt.title('GAN_V')
#plt.xlabel('a')
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(222)
data=(whole_features['scores_scale']+1).reshape(-1,1)
#data=np.log(data)
xt, lmbda = stats.boxcox((whole_features['scores'])+1)
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
plt.xlim(-1.5, 2.5)
plt.title('GAN')
#plt.xlabel('b')
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(223)
data=(whole_features['maxmaxmin']).reshape(-1,1)
data=np.log(data)
plt.xlim(-3, -1)
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
plt.title('maxmin')
#plt.xlabel('c')
plt.gca().axes.get_xaxis().set_ticklabels([])
#plt.gca().axes.get_xaxis().set_visible(False)
plt.subplot(224)
data=(whole_features['maxvar']).reshape(-1,1)
data=np.log(data)
plt.xlim(-10, -5)
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
plt.title('maxvar')
#plt.xlabel('d')
plt.gca().axes.get_xaxis().set_ticklabels([])
#plt.savefig('figures/paper/before_transformation.pdf')
plt.show()
#%%
plt.show()
plt.subplot(221)
data=(whole_features['scores_scale_V']).reshape(-1,1)
data=np.log(data)
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
plt.xlim(-4, -0.5)
#plt.ylim(0, 0.03)
#plt.axis('off')
plt.title('GAN_V')
#plt.xlabel('a')
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(222)
data=(whole_features['scores_scale']).reshape(-1,1)
#data=np.log(data)
plt.hist(data, bins=1000, density=True, alpha=0.6, color='g')
plt.xlim(-2, 2)
plt.title('GAN')
#plt.xlabel('b')
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(223)
data=(whole_features['maxmaxmin']).reshape(-1,1)
data=np.log(data)
#data=np.log(data)
n=2
clf = mixture.GaussianMixture(n_components=n, covariance_type='full')
clf.fit(data)
lamb=3
data=(data-np.mean(clf.means_))**lamb/lamb
plt.xlim(-0.1, 0.1)
plt.hist(data, bins=10000, density=True, alpha=0.6, color='g')
plt.title('maxmin')
#plt.xlabel('c')
plt.gca().axes.get_xaxis().set_ticklabels([])
#plt.gca().axes.get_xaxis().set_visible(False)
plt.subplot(224)
data=(whole_features['maxvar']).reshape(-1,1)
data=np.log(data)
n=2
clf = mixture.GaussianMixture(n_components=n, covariance_type='full')
clf.fit(data)
#data=np.log(data)
lamb=3
data=(data-np.mean(clf.means_))**lamb/lamb
plt.xlim(-0.5, 0.75)
plt.hist(data, bins=10000, density=True, alpha=0.6, color='g')
plt.title('maxvar')
#plt.xlabel('d')
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.savefig('figures/paper/after_transformation_GMMmean.pdf')
plt.show()
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# # # # # # # # find the main accuracy in the following code
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# # selected data features for final detection
# =============================================================================
# =============================================================================
data=[]
#xt, lmbda = stats.boxcox((whole_features['scores'])+1)
#xt=preprocessing.scale(xt)
#data.append(whole_features['scores'])
#
##xt, lmbda = stats.boxcox((whole_features['scores_V'])+1)
##xt=preprocessing.scale(xt)
#data.append(whole_features['scores_V'])
xt, lmbda = stats.boxcox((whole_features['maxvar']))
#xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['maxmaxmin']))
#xt=preprocessing.scale(xt)
data.append(xt)
data=np.array(data)
#%%
filename='data/Armin_Data/July_03/pkl/J3.pkl'
select_1224=load_real_data(filename)
#%%
# =============================================================================
# =============================================================================
# # basic whole anomalies with zp=3
# =============================================================================
# =============================================================================
zp=3
names=['maxvar','maxmaxmin']
basic_anoms={}
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
basic_anoms[names[i]]=anoms_1224
#%%
# =============================================================================
# =============================================================================
# # detected anomalies with zp=3.1
# =============================================================================
# =============================================================================
zp=3.1
names=['maxvar','maxmaxmin']
detected_anoms={}
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
detected_anoms[names[i]]=anoms_1224
#%%
# =============================================================================
# =============================================================================
# # uninon of basic mdoel anoms
# =============================================================================
# =============================================================================
basic_union=np.array([])
for f in basic_anoms:
basic_union=np.union1d(basic_anoms[f],basic_union)
basic_union_unique=rep_check(basic_union)
#%%
# =============================================================================
# =============================================================================
# # uninon of detected mdoel anoms
# =============================================================================
# =============================================================================
detected_union=np.array([])
for f in detected_anoms:
detected_union=np.union1d(detected_anoms[f],detected_union)
detected_union_unique=rep_check(detected_union)
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # different of basic and detected
# =============================================================================
# =============================================================================
# =============================================================================
diff_basic_detected=np.setdiff1d(basic_union,detected_union)
diff_basic_detected_unique=rep_check(diff_basic_detected)
#%%
dst='figures/all_events/July_03/acc/diff'
for anom in diff_basic_detected_unique:
print(anom)
anom=int(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('Q')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.show()
#%%%
# =============================================================================
# =============================================================================
# # save detected events
# =============================================================================
# =============================================================================
dst='figures/all_events/July_03/acc/detected'
for anom in detected_union_unique:
print(anom)
anom=int(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)])
plt.legend('A' 'B' 'C')
plt.title('Q')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.show()
#%%
# =============================================================================
# =============================================================================
# # scatter plot of just GAN model with two scores as feature
# =============================================================================
# =============================================================================
import matplotlib
#matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.usetex'] = False
fig, ax = plt.subplots() # create a new figure with a default 111 subplot
ax.scatter(whole_features['scores_scale'],whole_features['scores_scale_V'],c=whole_features['color'],label=whole_features['color'])
#ax.legend(['r','b'], ['event', 'normal'], loc="lower left")
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.gca().axes.get_yaxis().set_ticklabels([])
#'\\textit{Velocity (\N{DEGREE SIGN}/sec)}
plt.xlabel('Score from main GAN_{i,p,q}',fontsize=25)
plt.ylabel('Score from GAN_{v}',fontsize=25)
#plt.label('Normal', 'Event',fontsize=25)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
axins = zoomed_inset_axes(ax, 8, loc=4) # zoom-factor: 2.5, location: upper-left
axins.scatter(whole_features['scores_scale'],whole_features['scores_scale_V'],c=whole_features['color'])
x1, x2, y1, y2 = -6, 6, -10, 3 # specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.yticks(visible=False)
plt.xticks(visible=False)
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
#plt.savefig('figures\paper\GAN_GANV.png',dpi=10000)
#plt.show()
#%%
# =============================================================================
# =============================================================================
# # plot sum
# =============================================================================
# =============================================================================
#%%
temp_anom=np.union1d(anoms['scores'],anoms['scores_V'])
maxs=np.union1d(anoms['maxvar'],anoms['maxmaxmin'])
temp_anom=np.setdiff1d(temp_anom,maxs)
#temp_anom=np.setdiff1d(temp_anom,anoms['maxmaxmin'])
temp_anom.shape
#%%
temp_anom=np.union1d(anoms32['scores'],anoms32['scores_V'])
maxs=np.union1d(anoms32['maxvar'],anoms32['maxmaxmin'])
tt=np.setdiff1d(maxs,temp_anom)
s=np.setdiff1d(temp_anom,maxs)
total=np.union1d(temp_anom,maxs)
backtoback=[]
for i in tt:
if np.min(np.abs(temp_anom- i)) < 3:
backtoback.append(i)
print(len(backtoback))
tt=np.setdiff1d(tt,backtoback)
print(tt.shape)
#%%
def rep_check(inp):
output=[]
for i in range(inp.shape[0]-1):
if not np.min(np.abs(inp[i+1]- inp[i])) < 3:
output.append(inp[i])
output=np.array(output)
return output
#%%
riz=np.setdiff1d(rep_check(anoms3['maxvar']),rep_check(anoms31['maxvar']))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,983
|
zyh88/PMU
|
refs/heads/master
|
/GAN and AED.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Activation,Embedding, LSTM, Reshape, CuDNNLSTM, UpSampling2D,Conv2D,Flatten,MaxPooling2D
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
#%%
event_file="data/Armin_Data/event_hand_standardized.pkl"
pkl_file = open(event_file, 'rb')
events = pkl.load(pkl_file)
pkl_file.close()
#%%
xtr=[]
ytr=[]
#day='July_03'
for day in events:
for anom in events[day]:
# for i in range(120):
xtr.append(events[day][anom])
# ytr.appe
xtr=np.array(xtr)
xtr=xtr.reshape(-1,1,12,240)
#s=xtr.shape
#xtr=xtr.reshape(s[0],s[1],1)
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
autoencoder = Sequential()
# Encoder Layers
autoencoder.add(Dense(1028,activation='relu', input_dim=12*240))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(512,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(256,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(32,activation='relu', name="latent_space"))
autoencoder.add(LeakyReLU(0.2))
# Decoder Layers
autoencoder.add(Dense(256,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(512,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(1028,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.add(Dense(12*240,activation='relu'))
autoencoder.add(LeakyReLU(0.2))
autoencoder.summary()
#%%
"""
Combined Autoencoder with convolutional layers, fully connected layers and upsampling decoder
:return: model
"""
# Input
input_img = Input(shape=(1, 12, 240))
# Encoder
x = Conv2D(16,(3,3),
activation='relu',
padding='same',
data_format='channels_first')(input_img)
x = Conv2D(16,(3,3),
activation='relu',
padding='same',
data_format='channels_first')(x)
x = MaxPooling2D((2,2),
padding='same',
data_format='channels_first')(x) # Size 8x14x14
x = Conv2D(32,(3,3),
activation='relu',
padding='same',
data_format='channels_first')(x)
x = Conv2D(32,(3,3),
activation='relu',
padding='same',
data_format='channels_first')(x)
x = MaxPooling2D((2,2),
padding='same',
data_format='channels_first')(x) # Size 16x7x7
x = Flatten()(x)
x = Dense(256)(x)
code= Dense(32,name='latent_space')(x)
# Decoder
x = Dense(256)(code)
x = Dense(2880)(x)
x = Reshape((16,3,60))(x)
x = UpSampling2D((2, 2),
data_format='channels_first')(x)
x = Conv2D(32, (3, 3),
activation='relu',
padding='same',
data_format='channels_first')(x)
x = Conv2D(32, (3, 3),
activation='relu',
padding='same',
data_format='channels_first')(x)
x = UpSampling2D((2, 2),
data_format='channels_first')(x) # Size 16x16x16
x = Conv2D(16, (3, 3),
activation='relu',
padding='same',
data_format='channels_first')(x)
decoded = Conv2D(1, (3, 3),
activation='relu',
padding='same',
data_format='channels_first')(x)
autoencoder = Model(input_img, decoded)
#%%
autoencoder.summary()
#%%
encoder = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('latent_space').output)
encoder.summary()
#%%
autoencoder.compile(optimizer='adam', loss='msle')
autoencoder.fit(xtr, xtr,
epochs=100,
batch_size=10,
)
#%%
##
encoder.save('encoder_CNN_161632232_256_32dense_100_10.h5')
autoencoder.save('autoencoder_CNN_161632232_256_32dense_100_10.h5')
#%%
#encoder=load_model('encoder_dense102851225632.h5')
#autoencoder=load_model('autoencoder_dense102851225632.h5')
#%%
num_images = 10
#np.random.seed(42)
random_test_images = np.random.randint(xtr.shape[0], size=num_images)
encoded_imgs = encoder.predict(xtr)
decoded_imgs = autoencoder.predict(xtr)
#plt.figure(figsize=(18, 4))
for i, image_idx in enumerate(random_test_images):
# plot original image
ax = plt.subplot(3, num_images, i + 1)
plt.plot(xtr[image_idx].reshape(12, 240)[3])
# plt.imshow(xtr[image_idx].reshape(12, 240))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# plot encoded image
ax = plt.subplot(3, num_images, num_images + i + 1)
plt.imshow(encoded_imgs[image_idx].reshape(8, 4))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# plot reconstructed image
ax = plt.subplot(3, num_images, 2*num_images + i + 1)
plt.plot(decoded_imgs[image_idx].reshape(12, 240)[3])
# plt.imshow(decoded_imgs[image_idx].reshape(12, 240))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
#%%
# =============================================================================
# =============================================================================
# # Classification
# =============================================================================
# =============================================================================
# =============================================================================
# calculate the latent space for each event
# =============================================================================
encoded_imgs = encoder.predict(xtr)
#%%
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2, random_state=0).fit(encoded_imgs)
#kmeans.labels_
for n_clusters in np.arange(16,25):
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(encoded_imgs)
silhouette_avg = silhouette_score(encoded_imgs, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
#%%
kmeans = KMeans(n_clusters=10, random_state=0).fit(encoded_imgs)
kmeans.labels_
#%%
for num,k in enumerate(kmeans.labels_):
# print(k)
if k == 4:
print(k)
plt.plot(xtr[num].reshape(12,240)[3])
plt.show()
#%%
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,984
|
zyh88/PMU
|
refs/heads/master
|
/1225_event_extraction_9_features.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import random
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from natsort import natsorted
from scipy import stats
from seaborn import heatmap
import loading_data
from loading_data import load_train_vitheta_data_1225,load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features
#%%
#%%
# =============================================================================
# =============================================================================
# # save data with V I and theta for 1225
# =============================================================================
# =============================================================================
filename='Raw_data/1225/data'
#os.listdir(filename)
#
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
cosin={}
# Reacive={}
# keys={}
# pf={}
cosin['TA']=np.cos((selected_data['L1ANG']-selected_data['C1ANG'])*(np.pi/180))
cosin['TB']=np.cos((selected_data['L2ANG']-selected_data['C2ANG'])*(np.pi/180))
cosin['TC']=np.cos((selected_data['L3ANG']-selected_data['C3ANG'])*(np.pi/180))
# Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
# Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
# Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['TA']=cosin['TA']
selected_data['TB']=cosin['TB']
selected_data['TC']=cosin['TC']
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
day_data={}
for key in k:
day_data[key]=selected_data[key]
dir='Raw_data/1225/VIT.pkl'
output = open(dir, 'wb')
pkl.dump(day_data, output)
output.close()
#%%
# =============================================================================
# =============================================================================
# # train data prepreation
# =============================================================================
# =============================================================================
#start,SampleNum,N=(0,40,500000)
#filename='Raw_data/1225/VIT.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
##%%
#dds=load_standardized_data_with_features(filename,k)
##%%
#dd=load_data_with_features(filename,k)
#%%
# =============================================================================
# =============================================================================
# # real data for 1225 VIT
# =============================================================================
# =============================================================================
filename='Raw_data/1225/VIT.pkl'
pkl_file = open(filename, 'rb')
selected_data_1225_normal = pkl.load(pkl_file)
pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # data without key
# =============================================================================
# =============================================================================
selected_data_1225=[]
for f in k:
selected_data_1225.append(selected_data_1225_normal[f])
#%%
start,SampleNum,N=(0,40,500000)
filename='Raw_data/1225/VIT.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
tt=load_train_vitheta_data_1225(start,SampleNum,N,filename,k)
#%%
X_train = tt
scores={}
probability_mean={}
anomalies={}
kkk=k[0:1]
for idx,key in enumerate(kkk):
print(key)
X_train_temp=X_train[:,idx]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
id=int(np.floor(idx/3))
mode=k[id*3]
# dis_name='dis_sep_onelearn_'+mode+'.h5'
# print(dis_name)
#
# discriminator=load_model(dis_name)
rate=1000
shift=N/rate
scores[key]=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
scores[key].append(temp)
# print(i)
scores[key]=np.array(scores[key])
scores[key]=scores[key].ravel()
probability_mean[key]=np.mean(scores[key])
data=scores[key]-probability_mean[key]
mu, std = norm.fit(data)
zp=3
high=mu+zp*std
low=mu-zp*std
anomalies[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])
print(anomalies[key].shape)
#%%
# =============================================================================
# =============================================================================
# # plot 1225
# =============================================================================
# =============================================================================
def show_1225(events):
SampleNum=40
for anom in events:
anom=int(anom)
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(selected_data_1225[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('T')
plt.show()
#%%
X_train = tt
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=1*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,1),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,1), input_shape=(1*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=5
epochnum=2
#%%
start,SampleNum,N=(0,40,500000)
#X_train = load_data(start,SampleNum,N)
#filename=
X_train = tt
batch_count = X_train.shape[0] / batch_size
##%%
#X_train=X_train.reshape(N,3*SampleNum)
#X_train=X_train.reshape(N,SampleNum,3)
#%%
rnd={}
for i in range(epochnum):
rnd[i]=np.random.randint(low=0,high=N,size=batch_size)
# show(rnd[i])
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
all_scores=[]
def training(generator,discriminator,gan,epochs, batch_size,all_scores):
# all_scores=[]
scale=1
for e in range(1,epochs+1 ):
all_score_temp=[]
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,1)
# print(generated_images.shape)
# Get a random set of real images
# random.seed(0)
image_batch =X_train_temp[rnd[e-1]]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
rate=1000
shift=N/rate
all_score_temp=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
all_score_temp.append(temp)
# print(i)
all_score_temp=np.array(all_score_temp)
all_score_temp=all_score_temp.ravel()
all_scores.append(all_score_temp)
toc = time.clock()
print(toc-tik)
#%%
kk=['L1MAG']
for idx,key in enumerate(kk):
X_train_temp=X_train[:,(idx)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size,all_scores)
toc = time.clock()
print(toc-tic)
#
# gan_name='gan_sep_onelearn_good_09_'+key+'.h5'
# gen_name='gen_sep_onelearn_good_09_'+key+'.h5'
# dis_name='dis_sep_onelearn_good_09_'+key+'.h5'
# print(dis_name)
# gan.save(gan_name)
# generator.save(gen_name)
# discriminator.save(dis_name)
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,985
|
zyh88/PMU
|
refs/heads/master
|
/testPMU1224datasorting.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 12:26:15 2019
@author: hamed
"""
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import pickle
import matplotlib.pyplot as plt
import operator
import math
import natsort
from scipy.io import loadmat
from math import ceil
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
#%%
# importing data from a file function
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
# Pythono3 code to rename multiple
# files in a directory or folder
# =============================================================================
# =============================================================================
# =============================================================================
# # # Reanme the file names in a folder
# =============================================================================
# =============================================================================
# =============================================================================
for n in np.arange(4,18):
if n<10:
dir="data/Armin_Data/July_0"+str(n)+"/"
else:
dir="data/Armin_Data/July_"+str(n)+"/"
# Function to rename multiple files
def main():
i = 0
for filename in os.listdir(dir)[24:48]:
dst =str(i) + ".csv"
src =dir+ filename
dst =dir+ dst
# rename() function will
# rename all the files
os.rename(src, dst)
i += 1
# Driver Code
if __name__ == '__main__':
# Calling main() function
main()
# whole data filenames in the data directory
if n<10:
dir="data/Armin_Data/July_0"+str(n)
else:
dir="data/Armin_Data/July_"+str(n)
foldernames=os.listdir(dir)
filenames1224=foldernames[0:24]
#filenames1224.sort(key=lambda f: int(filter(str.isdigit, f)))
filenames1224=natsort.natsorted(filenames1224)
#active and reactive power consumption calculation
whole_data=[]
#filenames1224.sort(key=lambda f: int(filter(str.isdigit, f)))
for count,i in enumerate(filenames1224):
Active={}
Reacive={}
keys={}
pf={}
selected_data=OneFileImport(i,dir)
Active['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.cos((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Active['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.cos((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Active['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.cos((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['PA']=Active['A']
selected_data['PB']=Active['B']
selected_data['PC']=Active['C']
selected_data['QA']=Reacive['A']
selected_data['QB']=Reacive['B']
selected_data['QC']=Reacive['C']
selected_data=selected_data.drop(columns=['Unnamed: 0','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang'])
if count==0:
whole_data=selected_data.values
else:
whole_data=np.(whole_data,selected_data.values,axis=0)
# whole_data.append(selected_data.values,axis=0)
print(i)
k=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
day_data={}
day_data['1224']={}
c=0
for key in k:
day_data['1224'][key]=whole_data[:,c]
c+=1
if n<10:
dir="data/Armin_Data/July_0"+str(n)+"/pkl"
else:
dir="data/Armin_Data/July_"+str(n)+"/pkl"
dir_name=dir
os.mkdir(dir_name)
# write python dict to a file
if n<10:
dir="data/Armin_Data/July_0"+str(n)+"/pkl/jul" + str(n) + ".pkl"
else:
dir="data/Armin_Data/July_"+str(n)+"/pkl/jul" + str(n) + ".pkl"
output = open(dir, 'wb')
pickle.dump(day_data, output)
output.close()
print(n)
#%%
#read a pickle file
pkl_file = open('CompleteOneDay.pkl', 'rb')
selected_data = pickle.load(pkl_file)
pkl_file.close()
print(n)
#%%
# =============================================================================
# =============================================================================
# #
# # find new pointer for july 03 from alireza new time file sent by email sep 3 2019
# # =============================================================================
#
# =============================================================================
time_file='data/Armin_Data/July_03/'
new_time = loadmat(time_file+'time.mat')['time']
new_time=new_time.ravel()
#%%
# =============================================================================
# =============================================================================
# # vectorize the ceiling function
# =============================================================================
# =============================================================================
def f(x):
return np.ceil(x)
ceil2 = np.vectorize(f)
new_time=ceil2(new_time/100000)
#%%
times=np.array([])
for hour in range(24):
temp_times=pd.read_csv(time_file+str(hour)+'.csv')['Unnamed: 0']
times=np.concatenate((times,temp_times))
#%%
times=np.array(times)
#%%
times=times.ravel()
#%%
times=ceil2(times/100000)
#%%
#new_pointer=[]
#s=times.shape
#for point in range(s):
# if times[point] in new_time:
# new_pointer.append(point)
# else:
# print(point)
#%%
diff=np.setdiff1d(times,new_time)
uni=np.union1d(times,new_time)
inter=np.intersect1d(times,new_time)
#%%
records_array = times
idx_sort = np.argsort(records_array)
sorted_records_array = records_array[idx_sort]
vals, idx_start, count = np.unique(sorted_records_array, return_counts=True,
return_index=True)
# sets of indices
res = np.split(idx_sort, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
vals = vals[count > 1]
res = filter(lambda x: x.size > 1, res)
#%%
# =============================================================================
# =============================================================================
# # time list for 1200
# =============================================================================
# =============================================================================
times_1200=np.array([])
for hour in range(24):
temp_times=pd.read_csv(time_file+'Bld_1200_'+str(hour+1)+'.csv')['Unnamed: 0']
times_1200=np.concatenate((times_1200,temp_times))
#%%
times_1200=np.array(times_1200)
#%%
times_1200=times_1200.ravel()
#%%
times_1200=ceil2(times_1200/100000)
#%%
diff=np.setdiff1d(times_1200,new_time)
uni=np.union1d(times_1200,new_time)
inter=np.intersect1d(times_1200,new_time)
#%%
records_array = times_1200
idx_sort = np.argsort(records_array)
sorted_records_array = records_array[idx_sort]
vals, idx_start, count = np.unique(sorted_records_array, return_counts=True,
return_index=True)
# sets of indices
res = np.split(idx_sort, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
vals = vals[count > 1]
res = filter(lambda x: x.size > 1, res)
#%%
old_pointer=loadmat('data/pointer.mat')['pointer']['Jul_03'][0].ravel()[0].ravel()
new_pointer=np.array([])
for point in old_pointer:
tempt=times_1200[point]
p=np.where(new_time==tempt)
print(p)
new_pointer=np.append(new_pointer,p)
#%%
# =============================================================================
# =============================================================================
# # use the new pointer to extract the anomalies in the main data from alirezas method
# =============================================================================
# =============================================================================
# =============================================================================
# load real data
# =============================================================================
filename='data/Armin_Data/July_17/pkl/J17.pkl'
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
#%%
select_1224=load_real_data(filename)
#%%
new_pointer.sort()
dst="figures/1224_15_days/July_03/window"
dir=
os.remove(dir+'/window')
os.mkdir(dir+'/window')
# =============================================================================
# save the window method event points
# =============================================================================
for anom in old_pointer:
anom=int(anom)
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('Q')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.show()
#%%%%
files='data/Armin_Data/July_03/Hunter_1224_'
v1=np.array([])
for hour in range(24):
print(hour)
v1temp=pd.read_csv(files+str(hour+1)+'.csv')['L1Mag']
v1=np.concatenate((v1,v1temp),axis=None)
plt.plot(v1)
#%%
# importing data from a file function
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
for n in np.arange(3,18):
if n<10:
num='0'+str(n)
else:
num=str(n)
dir='data/Armin_Data/July_'+num
foldernames=os.listdir(dir)
selected_files=np.array([])
for f in foldernames:
spl=f.split('_')
if 'Bld' in spl:
selected_files=np.append(selected_files,f)
# filenames1224=foldernames[0:24]
#filenames1224.sort(key=lambda f: int(filter(str.isdigit, f)))
filenames1224=natsort.natsorted(selected_files)
#active and reactive power consumption calculation
whole_data=np.array([])
#filenames1224.sort(key=lambda f: int(filter(str.isdigit, f)))
for count,file in enumerate(filenames1224):
print(count,file)
Active={}
Reacive={}
keys={}
pf={}
selected_data=OneFileImport(file,dir)
Active['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.cos((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Active['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.cos((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Active['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.cos((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['PA']=Active['A']
selected_data['PB']=Active['B']
selected_data['PC']=Active['C']
selected_data['QA']=Reacive['A']
selected_data['QB']=Reacive['B']
selected_data['QC']=Reacive['C']
selected_data=selected_data.drop(columns=['Unnamed: 0','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang'])
if count==0:
whole_data=selected_data.values
else:
whole_data=np.append(whole_data,selected_data.values,axis=0)
# whole_data.append(selected_data.values,axis=0)
# print(i)
k=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
day_data={}
day_data['1224']={}
c=0
for key in k:
day_data['1224'][key]=whole_data[:,c]
c+=1
# =============================================================================
# for Bld
# =============================================================================
dir=dir+'/pklBld'
os.mkdir(dir)
dir=dir+'/J'+str(n)+'.pkl'
# write python dict to a file
output = open(dir, 'wb')
pickle.dump(day_data, output)
output.close()
print(n)
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,986
|
zyh88/PMU
|
refs/heads/master
|
/image.py
|
window_data=j13[0][323438*20-4000:323438*20+4600]
#%%
window_median=np.median(window_data)
#%%
def mad_find(window_size,window_data,eps):
# window_size=100
shift=int(window_size/2)
# shift=0
data_size=window_data.shape[0]
moving_median=[]
MAD=[]
upperbound=[]
lowerbound=[]
shift_moving_median=[]
shift_MAD=[]
shiftedup=[]
shiftedlow=[]
gama=1.4826
# eps=5
for window in range(int(data_size/window_size)):
start=window*window_size
end=start+window_size
temp_data=window_data[start:end]
temp_median=np.median(temp_data)
temp_MAD=gama*np.median(np.absolute(temp_data-temp_median))
moving_median.append(temp_median)
MAD.append(temp_MAD)
for i in range(window_size):
upperbound.append(temp_median+eps*temp_MAD)
lowerbound.append(temp_median-eps*temp_MAD)
if window <int(data_size/window_size):
start=window*window_size+shift
end=start+window_size
temp_data=window_data[start:end]
temp_median=np.median(temp_data)
temp_MAD=gama*np.median(np.absolute(temp_data-temp_median))
shift_moving_median.append(temp_median)
shift_MAD.append(temp_MAD)
for i in range(window_size):
shiftedup.append(temp_median+eps*temp_MAD)
shiftedlow.append(temp_median-eps*temp_MAD)
return lowerbound,upperbound,shiftedlow,shiftedup
#%%
ddtt=dd[7][2076500:207500]
#%%
a=0
b=8000
plt.plot(window_data[a:b])
#for i in [120, 360, 600, 840, 1080]:
# low,up,sl,su=mad_find(i,window_data,4.2)
#
# plt.plot(low[a:b],color='r')
#
# plt.plot(up[a:b],color='r')
# sl=np.array(sl)
# sl=np.roll(sl,int(i/2))
# su=np.array(su)
# su=np.roll(su,int(i/2))
#
# plt.plot(sl,color='r')
#
# plt.plot(su,color='r')
#plt.title('Q (kVAR)',fontsize= 30)
plt.legend(['Voltage','Upper and lower bound'],fontsize=30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylim([7125,7195)
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
#plt.xlabel('MPM',fontsize= 30)
plt.ylabel('Voltage (v)',fontsize= 30)
plt.show()
#%%
#%%
plt.plot(window_data)
#for i in [120, 360, 600, 840, 1080]:
# low,up,sl,su=mad_find(i,window_data,4.2)
#
# plt.plot(low,color='r')
#
# plt.plot(up,color='r')
# sl=np.array(sl)
# sl=np.roll(sl,int(i/2))
# su=np.array(su)
# su=np.roll(su,int(i/2))
#
# plt.plot(sl,color='r')
#
# plt.plot(su,color='r')
#plt.title('Q (kVAR)',fontsize= 30)
plt.legend(['Voltage','Upper and lower bound'],fontsize=30)
plt.xlabel('Timeslots',fontsize= 30)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# plt.figtext(.5,.9,'Temperature', fontsize=100, ha='center')
#plt.xlabel('MPM',fontsize= 30)
plt.ylabel('Voltage (v)',fontsize= 30)
plt.show()
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,987
|
zyh88/PMU
|
refs/heads/master
|
/3 phase v i theta separately.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import random
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from natsort import natsorted
from scipy import stats
from seaborn import heatmap
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features
#%%
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
#%%
# importing data from a file function
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
# =============================================================================
# =============================================================================
# # save data with V I and theta
# =============================================================================
# =============================================================================
for n in [14]:
if n<10:
dir="../../UCR/PMU data/Data/July_0"+str(n)+"/"
else:
dir="../../UCR/PMU data/Data/July_"+str(n)+"/"
#dir='data/Armin_Data/July_03'
#os.listdir('../../UCR/PMU data/Data')
foldernames=os.listdir(dir)
selected_files=np.array([])
for f in foldernames:
spl=f.split('_')
if 'Hunter' in spl:
selected_files=np.append(selected_files,f)
selected_files
filenames1224=natsorted(selected_files)
filenames1224
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
whole_data=np.array([])
for count,file in enumerate(filenames1224):
print(count,file)
cosin={}
# Reacive={}
# keys={}
# pf={}
selected_data=OneFileImport(file,dir)
cosin['TA']=np.cos((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180))
cosin['TB']=np.cos((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180))
cosin['TC']=np.cos((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180))
# Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
# Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
# Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['TA']=cosin['TA']
selected_data['TB']=cosin['TB']
selected_data['TC']=cosin['TC']
selected_data=selected_data.drop(columns=['Unnamed: 0','L1Ang', 'L2Ang', 'L3Ang','C1Ang', 'C2Ang', 'C3Ang'])
#
# selected_data['QA']=Reacive['A']
# selected_data['QB']=Reacive['B']
# selected_data['QC']=Reacive['C']
#
if count==0:
whole_data=selected_data.values
else:
whole_data=np.append(whole_data,selected_data.values,axis=0)
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang']
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
day_data={}
day_data['1224']={}
c=0
for key in k:
day_data['1224'][key]=whole_data[:,c]
c+=1
# if n<10:
# dir="data/Armin_Data/July_sep_0"+str(n)+"/pkl"
# else:
# dir="data/Armin_Data/July_sep_"+str(n)+"/pkl"
# dir_name=dir
# os.mkdir(dir_name)
# write python dict to a file
if n<10:
dir="data/Armin_Data/July_0"+str(n)+"/pkl/rawdata" + str(n) + ".pkl"
else:
dir="data/Armin_Data/July_"+str(n)+"/pkl/rawdata" + str(n) + ".pkl"
output = open(dir, 'wb')
pkl.dump(day_data, output)
output.close()
print(n)
#%%
# =============================================================================
# =============================================================================
# # train data prepreation
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#%%
dds=load_standardized_data_with_features(filename,k)
#%%
dd=load_data_with_features(filename,k)
#%%
start,SampleNum,N=(0,40,500000)
filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
tt=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
filename='data/Armin_Data/July_10/pkl/rawdata10.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds14=load_standardized_data_with_features(filename,k)
dd14=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#tt14=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
filename='data/Armin_Data/July_07/pkl/rawdata7.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
dds7=load_standardized_data_with_features(filename,k)
dd7=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#tt7=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=1*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,1),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,1), input_shape=(1*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=10
epochnum=20
#%%
start,SampleNum,N=(0,40,500000)
#X_train = load_data(start,SampleNum,N)
#filename=
X_train = tt
batch_count = X_train.shape[0] / batch_size
##%%
#X_train=X_train.reshape(N,3*SampleNum)
#X_train=X_train.reshape(N,SampleNum,3)
#%%
rnd={}
for i in range(epochnum):
rnd[i]=np.random.randint(low=0,high=N,size=batch_size)
# show(rnd[i])
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
all_scores=[]
def training(generator,discriminator,gan,epochs, batch_size,all_scores):
# all_scores=[]
scale=1
for e in range(1,epochs+1 ):
all_score_temp=[]
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,1)
# print(generated_images.shape)
# Get a random set of real images
# random.seed(0)
image_batch =X_train_temp[rnd[e-1]]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
rate=1000
shift=N/rate
all_score_temp=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
all_score_temp.append(temp)
# print(i)
all_score_temp=np.array(all_score_temp)
all_score_temp=all_score_temp.ravel()
all_scores.append(all_score_temp)
toc = time.clock()
print(toc-tik)
#%%
kk=['L1mag']
for idx,key in enumerate(kk):
X_train_temp=X_train[:,(idx+6)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size,all_scores)
toc = time.clock()
print(toc-tic)
#
# gan_name='gan_sep_onelearn_good_09_'+key+'.h5'
# gen_name='gen_sep_onelearn_good_09_'+key+'.h5'
# dis_name='dis_sep_onelearn_good_09_'+key+'.h5'
# print(dis_name)
# gan.save(gan_name)
# generator.save(gen_name)
# discriminator.save(dis_name)
#%%
scores_temp={}
probability_mean={}
anomalies_temp={}
#kk=['TA','TB','TC']
for idx,key in enumerate(kk):
print(key)
X_train_temp=X_train[:,(idx+6)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
# id=int(np.floor(idx/3))
# mode=k[id*3]
# dis_name='dis_sep_onelearn_'+mode+'.h5'
#
# discriminator=load_model(dis_name)
rate=1000
shift=N/rate
scores_temp[key]=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
scores_temp[key].append(temp)
print(i)
scores_temp[key]=np.array(scores_temp[key])
scores_temp[key]=scores_temp[key].ravel()
probability_mean[key]=np.mean(scores_temp[key])
data=scores_temp[key]-probability_mean[key]
mu, std = norm.fit(data)
zp=3
high=mu+zp*std
low=mu-zp*std
anomalies_temp[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])
print(anomalies_temp[key].shape)
#%%
kk=['L1MAG','C1MAG','TA']
for idx,key in enumerate(kk):
X_train_temp=X_train[:,idx*3]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
gan_name='gan_sep_onelearn_'+key+'.h5'
gen_name='gen_sep_onelearn_'+key+'.h5'
dis_name='dis_sep_onelearn_'+key+'.h5'
print(dis_name)
gan.save(gan_name)
generator.save(gen_name)
discriminator.save(dis_name)
#%%
scores={}
probability_mean={}
anomalies={}
#k=k[0:3]
#k=['L1MAG','C1MAG','TA']
for idx,key in enumerate(k):
print(key)
X_train_temp=X_train[:,idx]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
id=int(np.floor(idx/3))
mode=k[id*3]
dis_name='dis_sep_onelearn_'+mode+'.h5'
print(dis_name)
discriminator=load_model(dis_name)
rate=1000
shift=N/rate
scores[key]=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
scores[key].append(temp)
# print(i)
scores[key]=np.array(scores[key])
scores[key]=scores[key].ravel()
probability_mean[key]=np.mean(scores[key])
data=scores[key]-probability_mean[key]
mu, std = norm.fit(data)
zp=3
high=mu+zp*std
low=mu-zp*std
anomalies[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])
print(anomalies[key].shape)
#%%
def check_common(F1,F2):
common=[]
for event in F1:
shift_events=[event-2,event-1,event,event+1,event+2]
for i in shift_events:
if i in F2 and i not in common:
common.append(i)
common=np.array(common)
return common
#%%
commons={}
uni=np.array([])
for idx1,F1 in enumerate(anomalies):
for idx2,F2 in enumerate(anomalies):
commons[F1+'_'+F2]=check_common(anomalies[F1],anomalies[F2])
uni=np.union1d(uni,np.union1d(anomalies[F1],anomalies[F2]))
#%%
select_1224=dd
def show(events):
SampleNum=40
for anom in events:
anom=int(anom)
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('T')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%
def check_event_in_feature(event,f):
out=0
shift_events=[event-2,event-1,event,event+1,event+2]
for i in shift_events:
if i in f:
out=1
return out
#%%
# =============================================================================
# each detected event should have a vector of detected feature
# =============================================================================
def event_vector(event,anomalies):
vector=np.zeros((9,1))
for idx,f in enumerate(anomalies):
vector[idx,0]=check_event_in_feature(event,anomalies[f])
return vector
#%%
event_vectors={}
for id,event in enumerate(uni):
print(id)
event_vectors[event]=event_vector(event,anomalies)
#%%
# =============================================================================
# unique events
# =============================================================================
def unique_events(uni):
unique=[42]
for i in uni:
out=1
shift_events=[i-2,i-1,i,i+1,i+2]
print(shift_events)
for j in shift_events:
if j in unique:
out=0
if out==1:
unique.append(i)
unique=np.array(unique)
return unique
#%%
uniques=unique_events(uni)
#%%
# =============================================================================
# two group close intersection check
# =============================================================================
def two_check_inter(g1,g2):
intersection=[]
for i in g1:
shift_events=[i-2,i-1,i,i+1,i+2]
for j in g2:
if j in shift_events and j not in intersection:
intersection.append(i)
intersection=np.array(intersection)
return intersection
#%%
cluster_vecotrs=[]
cluster_vecotrs_events=[]
for i,k in enumerate(event_vectors):
cluster_vecotrs.append(event_vectors[k])
cluster_vecotrs_events.append(k)
cluster_vecotrs=np.array(cluster_vecotrs)
cluster_vecotrs_events=np.array(cluster_vecotrs_events)
#%%
# =============================================================================
# cluster events based on detected features
# =============================================================================
def feature_clustering(event_vectors):
cluster_vecotrs=[]
cluster_vecotrs_events=[]
for i,k in enumerate(event_vectors):
cluster_vecotrs.append(event_vectors[k])
cluster_vecotrs_events.append(k)
cluster_vecotrs=np.array(cluster_vecotrs)
cluster_vecotrs_events=np.array(cluster_vecotrs_events)
unique_feature_clusters=np.unique(cluster_vecotrs,axis=0)
feature_clusters={}
for i in range(unique_feature_clusters.shape[0]):
print(i)
feature_clusters[i]=[]
for j in event_vectors:
if list(unique_feature_clusters[i].ravel())==list(event_vectors[j].ravel()):
feature_clusters[i].append(j)
return feature_clusters
#%%
for i in range(197):
print(list(unique_feature_clusters[i]))
show([ff[i][0]])
#%%
i=190
print(list(unique_feature_clusters[i]))
show([ff[i][0]])
#%%
for j in ff[i]:
show([j])
#%%
pkl_file = open('data/Armin_data/oneday_3d_events.pkl', 'rb')
whole_features = pkl.load(pkl_file)
pkl_file.close()
# =============================================================================
# =============================================================================
# # selected data features for final detection
# =============================================================================
# =============================================================================
data=[]
#xt, lmbda = stats.boxcox((whole_features['scores'])+1)
#xt=preprocessing.scale(xt)
#data.append(whole_features['scores'])
#
##xt, lmbda = stats.boxcox((whole_features['scores_V'])+1)
##xt=preprocessing.scale(xt)
#data.append(whole_features['scores_V'])
xt, lmbda = stats.boxcox((whole_features['maxvar']))
#xt=preprocessing.scale(xt)
data.append(xt)
xt, lmbda = stats.boxcox((whole_features['maxmaxmin']))
#xt=preprocessing.scale(xt)
data.append(xt)
data=np.array(data)
# =============================================================================
# =============================================================================
# # basic whole anomalies with zp=3
# =============================================================================
# =============================================================================
zp=3
names=['maxvar','maxmaxmin']
basic_anoms={}
for i,d in enumerate(data):
dt = d
# Fit a normal distribution to the data:
mu, std = norm.fit(dt)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(dt>=high)[0], np.where(dt<=low)[0])
print(anoms_1224.shape)
basic_anoms[names[i]]=anoms_1224
#%%
# =============================================================================
# anomaly flag and color
# =============================================================================
flag=np.zeros((scores['L1MAG'].shape[0],1))
color=["b" for x in range(scores['L1MAG'].shape[0])]
flag_mvmpm=np.zeros((scores['L1MAG'].shape[0],1))
color_mvmpm=["b" for x in range(scores['L1MAG'].shape[0])]
for i in uni:
flag[int(i)]=1
color[int(i)]="r"
flag_mvmpm[int(i)]=1
color_mvmpm[int(i)]="r"
for i in basic_anoms:
for j in basic_anoms[i]:
if j<499500:
flag_mvmpm[int(j)]=1
color_mvmpm[int(j)]="r"
#%%
# =============================================================================
# =============================================================================
# # 3d catter plot
# =============================================================================
# =============================================================================
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(scores['L1MAG'], whole_features['maxmaxmin_scale'][0:scores['L1MAG'].shape[0]], whole_features['maxvar_scale'][0:scores['L1MAG'].shape[0]],color=color)
ax.set_xlabel('MPM')
ax.set_ylabel('MV')
ax.set_zlabel('Scaled GAN scores')
#%%
high_event_vectors_dict={}
high_event_vectors=[]
for i in event_vectors:
vec=[]
if sum(event_vectors[i][0:3])!=0:
vec.append(1)
else:
vec.append(0)
if sum(event_vectors[i][3:6])!=0:
vec.append(1)
else:
vec.append(0)
if sum(event_vectors[i][6:9])!=0:
vec.append(1)
else:
vec.append(0)
if sum(event_vectors[i][0:3])>=2 or sum(event_vectors[i][3:6])>=2 or sum(event_vectors[i][6:9])>=2:
vec.append(1)
else:
vec.append(0)
high_event_vectors_dict[i]=vec
high_event_vectors.append(vec)
#%%
selected_events_for_clustering=[]
for e in high_event_vectors_dict:
if sum(high_event_vectors_dict[e])>=3:
selected_events_for_clustering.append(e)
selected_events_for_clustering=np.array(selected_events_for_clustering)
#%%
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,988
|
zyh88/PMU
|
refs/heads/master
|
/PV_GAN_MULTI_LSTM_PMU_dl_data.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense,Activation, Flatten,Dropout, Input, Embedding, LSTM, MaxPooling2D, Reshape, CuDNNLSTM,Conv2DTranspose, Conv2D
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
#%%
voltage=[]
current=[]
power=[]
react=[]
dir_name="data/jul1pkl"
filename=os.listdir(dir_name)
filename = sorted(filename,key=lambda x: int(os.path.splitext(x)[0])) #sort file by digit
for file in filename:
print(file)
path=os.path.join(dir_name,file)
pkl_file = open(path, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
voltage.append(selected_data['L1MAG'].values)
current.append(selected_data['C1MAG'].values)
power.append(selected_data['PA'].values)
react.append(selected_data['QA'].values)
voltage=np.array(voltage).ravel()
current=np.array(current).ravel()
power=np.array(power).ravel()
react=np.array(react).ravel()
#%%
%matplotlib auto
plt.plot(voltage)
#%%
dir_name="data/jul1pkl"
filename=os.listdir(dir_name)
filename = sorted(filename,key=lambda x: int(os.path.splitext(x)[0])) #sort file by digit
def load_data(filenames,start,SampleNum,N):
#read a pickle file
for count, file in enumerate(filenames):
path=os.path.join(dir_name,file)
pkl_file = open(path, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(count)
if count==0:
data=selected_data
else:
data=pd.concat([data,selected_data])
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(data[f].values)
select=np.array(select)
select=preprocessing.scale(select,axis=1)
selected_data=0
# data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,12,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,data
#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
#%%
dir_name="data/jul1pkl"
filename=os.listdir(dir_name)
filename = sorted(filename,key=lambda x: int(os.path.splitext(x)[0])) #sort file by digit
def load_real_data(filenames,start,SampleNum,N):
#read a pickle file
for count, file in enumerate(filenames):
path=os.path.join(dir_name,file)
pkl_file = open(path, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(count)
if count==0:
data=selected_data
else:
data=pd.concat([data,selected_data])
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(data[f].values)
select=np.array(select)
return select
#%%
start,SampleNum,N=(0,40,200000)
X_train, selected ,data= load_data(filename,start,SampleNum,N)
print(X_train.shape,selected.shape)
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=12*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,12),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,12), input_shape=(12*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=10
epochnum=100
#%%
start,SampleNum,N=(0,40,100000)
X_train = load_data(filename,start,SampleNum,N)
batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
def training(generator,discriminator,gan,epochs, batch_size):
scale=1
for e in range(1,epochs+1 ):
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,12)
# print(generated_images.shape)
# Get a random set of real images
image_batch =X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
toc = time.clock()
print(toc-tik)
# if e == 1 or e % 5 == 0:
#
# plot_generated_images(e, generator)
#batch_size=0
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
#%%
gan.save('PV_GPU_gan_mul_2LSTM_N100000_e100_b10.h5')
generator.save('PV_GPU_generator_mul_2LSTM_N100000_e100_b10.h5')
discriminator.save('PV_GPU_discriminator_mul_2LSTM_N100000_e100_b10.h5')
#%%
gan=load_model('PV_GPU_gan_mul_LSTM_N2000_e100_b100.h5')
generator=load_model('PV_GPU_generator_mul_LSTM_N2000_e100_b100.h5')
discriminator=load_model('PV_GPU_discriminator_mul_LSTM_N2000_e100_b100.h5')
#%%
start,SampleNum,N=(0,40,2000)
#%%
X_train, selected,selected_data = load_data(start,SampleNum,N)
batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
a=discriminator.predict_on_batch(X_train)
#%%
rate=100
shift=N/rate
scores=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores.append(temp)
print(i)
scores=np.array(scores)
scores=scores.ravel()
#%%
probability_mean=np.mean(scores)
a=scores-probability_mean
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
plt.plot(a.ravel())
plt.ylabel('Event score')
plt.xlabel('training sample number')
#plt.ylim([.85,.95])
plt.savefig('probability score')
plt.show()
#%%
data = a
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
# Plot the histogram.
plt.hist(data, bins=25, density=True, alpha=0.6, color='g')
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.savefig('normalpdfscore')
plt.show()
#%%
stdnum=3.5
high=mu+stdnum*std
low=mu-stdnum*std
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
anoms=np.union1d(np.where(a>=high)[0], np.where(a<=low)[0])
print(np.union1d(np.where(a>=high)[0], np.where(a<=low)[0]).shape)
tt=X_train.reshape(N,12*SampleNum)
tt=X_train.reshape(N,12,SampleNum)
#%%
normal=np.arange(100,110)
for i in anoms :
print(i*int(SampleNum/2))
for j in range(12):
plt.plot(tt[i][j])
# plt.legend(('vol', 'curr', 'p','q'),shadow=True, loc=(0.01, 0.48), handlelength=1.5, fontsize=16)
plt.show()
#%%
select=load_real_data(filename,start,SampleNum,N)
#%%
for anom in anoms:
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.show()
#%%
normal=np.arange(0,10)
for anom in normal:
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.show()
#%%
selected=pd.DataFrame(selected)
selected=selected.T
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
start=0
dur=N*20
end=start+dur
selected['color']='b'
for i in anoms:
# print(i)
selected['color'].iloc[i*int(SampleNum/2):((i+1)*int(SampleNum/2)+40)]='r'
markers_on=np.where(selected['color'].iloc[start:end]=='r')
#plt.plot(selected[0].iloc[start:end], markevery=list(markers_on),marker='X',mec='r',mew=np.log(np.log(dur))
# ,ms=2*np.log(np.log(dur)),mfcalt='r')
#for i in range(5):
# plt.plot(selected[i].iloc[start:end])
# plt.show()
for j in [1,2,6,9]:
print(j)
plt.plot(list(selected[j].iloc[start:end].values))
# plt.xlabel('timeslots',fontsize=28)
# plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
plt.axvspan(i*int(SampleNum/2), ((i+1)*int(SampleNum/2)+40), color='red', alpha=0.5)
plt.show()
print('This is real ones')
for j in ['L3MAG','C3MAG','PC','QC']:
print(j)
plt.plot(list(selected_data[j].iloc[start:end].values))
# plt.xlabel('timeslots',fontsize=28)
# plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
plt.axvspan(i*int(SampleNum/2), ((i+1)*int(SampleNum/2)+40), color='red', alpha=0.5)
plt.show()
#plt.savefig('long.pdf', format='pdf', dpi=1200)
#plt.savefig('long %d.png' %dur)
#%%
dur_anoms=[]
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
dur_anoms.append([i*int(SampleNum/2),((i+1)*int(SampleNum/2)+20)])
plt.plot(selected[2].iloc[i*int(SampleNum/2)-20:((i+1)*int(SampleNum/2)+40)].values)
plt.xlabel('timeslots',fontsize=28)
plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
# plt.savefig('figures/event %d.png' %i)
# plt.savefig('figures/event %d.pdf' %i, format='pdf', dpi=1200)
plt.show()
print(dur_anoms)
print(len(dur_anoms))
#%%
# =============================================================================
# =============================================================================
# # subplot
# PMU
# =============================================================================
plt.subplot(2, 2, 1)
plt.plot(list(selected_data['L1MAG'].values))
plt.title('Real PMU data')
plt.ylabel('Real Voltage')
#plt.ylim([7100,7200])
plt.subplot(2, 2, 2)
plt.plot(list(selected_data['C1MAG'].values))
#plt.xlabel('time')
plt.ylabel('Real Current')
#plt.ylim([1,2])
plt.subplot(2, 2, 3)
plt.plot(list(selected_data['PA'].values))
#plt.title('Real PMU data')
plt.ylabel('Real ACtive Power')
plt.xlabel('time')
#plt.ylim([7100,7200])
plt.subplot(2, 2, 4)
plt.plot(list(selected_data['QA'].values))
#plt.title('Real PMU data')
plt.ylabel('Real Reactive Power')
plt.xlabel('time')
#plt.ylim([7100,7200])
plt.savefig('real.png')
plt.show()
#%%%
ss=preprocessing.scale(selected_data,axis=0)
plt.subplot(2, 2, 1)
plt.plot(ss[:,0])
plt.title('scaled PMU data')
plt.ylabel('scaled Voltage')
#plt.ylim([7100,7200])
plt.subplot(2, 2, 2)
plt.plot(ss[:,6])
#plt.xlabel('time')
plt.ylabel('scaled Current')
#plt.ylim([1,2])
plt.subplot(2, 2, 3)
plt.plot(ss[:,13])
#plt.title('scaled PMU data')
plt.ylabel('scaled ACtive Power')
plt.xlabel('time')
#plt.ylim([7100,7200])
plt.subplot(2, 2, 4)
plt.plot(ss[:,16])
#plt.title('scaled PMU data')
plt.ylabel('scaled Reactive Power')
plt.xlabel('time')
#plt.ylim([7100,7200])
plt.savefig('scale.png')
plt.show()
#plt.savefig('scale.png')
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,989
|
zyh88/PMU
|
refs/heads/master
|
/GAN_MULTI_LSTM_PMU_twolayer.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
#%%
def load_data(start,SampleNum,N):
#read a pickle file
pkl_file = open('CompleteOneDay.pkl', 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[pmu][f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,12,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
#%%
filename='CompleteOneDay.pkl'
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=12*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,12),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=20))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,12), input_shape=(12*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=100
epochnum=100
#%%
start,SampleNum,N=(0,40,500000)
#X_train = load_data(start,SampleNum,N)
X_train = load_data(start,SampleNum,N)
batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
def training(generator,discriminator,gan,epochs, batch_size):
scale=1
for e in range(1,epochs+1 ):
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,12)
# print(generated_images.shape)
# Get a random set of real images
image_batch =X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
toc = time.clock()
print(toc-tik)
# if e == 1 or e % 5 == 0:
#
# plot_generated_images(e, generator)
#batch_size=0
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
#%%
#
gan.save('GPU_gan_mul_LSTM_twolayer_N500000_e100_b10_1224_latent20.h5')
generator.save('GPU_generator_mul_LSTM_twolayer_N500000_e100_b10_1224_latent20.h5')
discriminator.save('GPU_discriminator_mul_LSTM_twolayer_N500000_e100_b10_1224_latent20.h5')
#%%
gan=load_model('GPU_gan_mul_LSTM_twolayer_N500000_e1000_b100.h5')
generator=load_model('GPU_generator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
discriminator=load_model('GPU_discriminator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
#%%
start,SampleNum,N=(0,40,500000)
X_train= load_data(start,SampleNum,N)
#batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
a=discriminator.predict_on_batch(X_train)
#%%
rate=1000
shift=N/rate
scores_1225=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores_1225.append(temp)
print(i)
scores_1225=np.array(scores_1225)
scores_1225=scores_1225.ravel()
#%%
#%%
probability_mean=np.mean(scores_1225)
a=scores_1225-probability_mean
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
plt.plot(a.ravel())
plt.show()
#%%
data = a
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
# Plot the histogram.
plt.hist(data, bins=25, density=True, alpha=0.6, color='g')
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
#%%
zp=9
high=mu+zp*std
low=mu-zp*std
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 8
fig_size[1] = 6
anoms_1225=np.union1d(np.where(a>=high)[0], np.where(a<=low)[0])
print(np.union1d(np.where(a>=high)[0], np.where(a<=low)[0]).shape)
#tt=X_train.reshape(N,12*SampleNum)
#tt=X_train.reshape(N,12,SampleNum)
#%%
ss=preprocessing.scale(select,axis=1)
zpnum=[]
entropy=[]
of=[]
ofn=[]
avg=[]
shape=[]
maxmin=np.zeros((1700,4))
for i in range(1700):
print(i)
zp=(i/10)+32
high=mu+zp*std
low=mu-zp*std
anoms_1225=np.union1d(np.where(a>=high)[0], np.where(a<=low)[0])
zpnum.append(anoms_1225.shape[0])
shape.append(anoms_1225.shape[0])
mn=0
keep=[]
if not anoms_1225.shape[0]==0:
maxx=0
minn=100
for anom in anoms_1225:
mnanom=0
for k in range(9):
vmr=ss[k][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)]-np.mean(ss[0][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
mnanom+=np.sqrt(np.sum(vmr**2))
mnanom=mnanom/12
if mnanom>maxx:
indxmax=anom
maxx=mnanom
if mnanom<minn:
indxmin=anom
minn=mnanom
keep.append(mnanom)
mn+=mnanom
maxmin[i][0]=max(keep)
maxmin[i][1]=min(keep)
maxmin[i][2]=indxmax
maxmin[i][3]=indxmin
mnalpha=mn/zp
mn=mn/anoms_1225.shape[0]
avg.append(mnalpha)
entropy.append(mn)
of.append(mn+np.sqrt(anoms_1225.shape[0]))
ofn.append(mn+(anoms_1225.shape[0]))
plt.plot(entropy)
plt.show()
plt.plot(of)
plt.show()
plt.plot(ofn)
plt.show()
plt.plot(maxmin[:,0])
plt.plot(maxmin[:,1])
plt.show()
#%%
plt.plot(entropy)
plt.show()
plt.plot(maxmin[:,1])
plt.show()
plt.plot(shape[200:])
plt.show()
#%%
normal=np.arange(100,110)
for i in anoms_1225[0:100] :
print(i*int(SampleNum/2))
for j in range(12):
plt.plot(tt[i][j])
plt.legend(('vol', 'curr', 'p','q'),shadow=True, loc=(0.01, 0.48), handlelength=1.5, fontsize=16)
plt.show()
#%%
select=load_real_data(filename)
#%%
dst="figures/1225_100_batch_anoms"
os.mkdir(dst)
#%%
for anom in anoms_1225:
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1225[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1225[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1225[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1225[i][anom*int(SampleNum/2):(anom*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('Q')
# plt.savefig('figures/1225_100_batch_anoms/anom %d.png' %anom)
plt.show()
print(a[int(anom)])
#%%
selected=pd.DataFrame(selected)
selected=selected.T
#%%
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 12 and height to 9
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
start=0
dur=int(N*20)
end=start+dur
#selected['color']='b'
#for i in anoms_1224:
# print(i)
## print(i)
# selected['color'].iloc[i*int(SampleNum/2):((i+1)*int(SampleNum/2)+40)]='r'
#
#markers_on=np.where(selected['color'].iloc[start:end]=='r')
#plt.plot(selected[0].iloc[start:end], markevery=list(markers_on),marker='X',mec='r',mew=np.log(np.log(dur))
# ,ms=2*np.log(np.log(dur)),mfcalt='r')
#for i in range(5):
# plt.plot(selected[i].iloc[start:end])
# plt.show()
for j in [0,3,6,9]:
plt.plot(selected[j][start:end])
# plt.xlabel('timeslots',fontsize=28)
# plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
for i in anoms:
# print(i)
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
plt.axvspan(i*int(SampleNum/2), ((i+1)*int(SampleNum/2)+40), color='red', alpha=0.5)
plt.savefig('day %d.pdf' %j, format='pdf', dpi=1200)
plt.savefig('day %d.png' %j)
plt.show()
#plt.savefig('long.pdf', format='pdf', dpi=1200)
#plt.savefig('long %d.png' %dur)
#%%
dur_anoms=[]
for i in anoms:
if (i*int(SampleNum/2)+1) in list(np.arange(start,end)):
dur_anoms.append([i*int(SampleNum/2),((i+1)*int(SampleNum/2)+20)])
plt.plot(selected[2].iloc[i*int(SampleNum/2)-20:((i+1)*int(SampleNum/2)+40)].values)
plt.xlabel('timeslots',fontsize=28)
plt.ylabel('phase 1 current magnitude pmu="1024"',fontsize=28)
# plt.savefig('figures/event %d.png' %i)
# plt.savefig('figures/event %d.pdf' %i, format='pdf', dpi=1200)
plt.show()
print(dur_anoms)
print(len(dur_anoms))
#%%
# =============================================================================
# =============================================================================
# # mutual events 1224, 1225
# =============================================================================
# =============================================================================
anom1224=os.listdir('figures/1224 two layer/')
anom1225=os.listdir('figures/1225_100_batch_anoms')
#%%
a1224=[]
for i in anom1224:
a1224.append(i.split(' ')[1].split('.')[0])
a1224=[int(i) for i in a1224]
a1224=np.array(a1224)
a1225=[]
for i in anom1225:
a1225.append(i.split(' ')[1].split('.')[0])
a1225=[int(i) for i in a1225]
a1225=[int(i) for i in a1225]
a1225=np.array(a1225)
#%%
# =============================================================================
# =============================================================================
# # copy mutual timeslots
# =============================================================================
# =============================================================================
dst="figures/1225mutual"
os.mkdir(dst)
for i in intersect:
dir_name="figures/1225 two layer/"
src=os.path.join(dir_name,i)
shutil.copy(src, dst, follow_symlinks=True)
#%%
select=load_real_data(filename)
#%%
intersection1224_1225=np.intersect1d(a1224,a1225)
#dst="figures/1225mutual1000_100"
#os.mkdir(dst)
for anom in intersection1224_1225:
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1225[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1225[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1225[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1225[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('Q')
# plt.savefig('figures/1225mutual1000_100/%d.png' %anom)
plt.show()
print(a[int(anom)])
#%%
intersection1224_1225=np.intersect1d(a1224,a1225)
dst="figures/1224mutual1000_100"
os.mkdir(dst)
for anom in intersection1224_1225:
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select[i][(anom-4)*int(SampleNum/2):((anom+4)*int(SampleNum/2)+40)])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.savefig('figures/1224mutual1000_100/%d.png' %anom)
plt.show()
print(a[int(anom)])
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,990
|
zyh88/PMU
|
refs/heads/master
|
/PMU data.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 12:26:15 2019
@author: hamed
"""
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import pickle
import matplotlib.pyplot as plt
import operator
import math
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
# whole data filenames in the data directory
filenames=os.listdir("Raw_data")
#%%
# importing data from a file function
def OneFileImport(filename):
dir_name="Raw_data"
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
data=OneFileImport(filenames[0])
#%%
#pmu locations
SeparateData={}
Locations=['1086','1224','1225','1200']
for loc in Locations:
SeparateData[loc]={}
columns=data.keys()
Tiemslots=data['Timestamp (ns)'].values
Dates=data['Human-Readable Time (UTC)'].values
for key in columns:
col=key.split('/')
if len(col)>1: #to ignore teh time and date
loc=col[1]
# print(loc,col)
entry, index = col[2].split(' ')
# print(entry)
if (entry !='LSTATE') and (index=='(Mean)'):
SeparateData[loc][entry]=data[key]
#%%
Locations=['1086','1224','1225','1200']
#%%
SeparateData={}
Locations=['1086','1224','1225','1200']
for loc in Locations:
SeparateData[loc]={}
Tiemslots=[]
Dates=[]
triger=0
filecount=0
for file in filenames:
CollectedData=OneFileImport(file)
if triger==0:
Tiemslots=CollectedData['Timestamp (ns)']
Dates=CollectedData['Human-Readable Time (UTC)']
if triger==1:
Tiemslots=np.append(Tiemslots,CollectedData['Timestamp (ns)'])
Dates=np.append(Dates,CollectedData['Human-Readable Time (UTC)'])
columns=CollectedData.keys()
for key in columns:
col=key.split('/')
if len(col)>1: #to ignore teh time and date
loc=col[1]
# print(loc,col)
entry, index = col[2].split(' ')
# print(entry)
if (entry !='LSTATE') and (index=='(Mean)'):
if triger==0:
SeparateData[loc][entry]=CollectedData[key]
if triger==1:
SeparateData[loc][entry]=np.append(SeparateData[loc][entry],CollectedData[key])
# if filecount==2:
# break
triger=1
print(filecount)
filecount=filecount+1
#%%
# write python dict to a file
outputt = open('OneDay.pkl', 'wb')
pickle.dump(SeparateData, outputt)
outputt.close()
#%%
#read a pickle file
pkl_file = open('OneDay.pkl', 'rb')
selected_data = pickle.load(pkl_file)
pkl_file.close()
#%%
#active and reactive power consumption calculation
Active={}
Reacive={}
keys={}
pf={}
for loc in Locations:
k=list(selected_data[loc].keys())
keys[loc]=sorted(k)
Active[loc]={}
Reacive[loc]={}
pf[loc]={}
for loc in Locations:
Active[loc]['A']=selected_data[loc]['L1MAG']*selected_data[loc]['C1MAG']*(np.cos((selected_data[loc]['L1ANG']-selected_data[loc]['C1ANG'])*(np.pi/180)))
Active[loc]['B']=selected_data[loc]['L2MAG']*selected_data[loc]['C2MAG']*(np.cos((selected_data[loc]['L2ANG']-selected_data[loc]['C2ANG'])*(np.pi/180)))
Active[loc]['C']=selected_data[loc]['L3MAG']*selected_data[loc]['C3MAG']*(np.cos((selected_data[loc]['L3ANG']-selected_data[loc]['C3ANG'])*(np.pi/180)))
Reacive[loc]['A']=selected_data[loc]['L1MAG']*selected_data[loc]['C1MAG']*(np.sin((selected_data[loc]['L1ANG']-selected_data[loc]['C1ANG'])*(np.pi/180)))
Reacive[loc]['B']=selected_data[loc]['L2MAG']*selected_data[loc]['C2MAG']*(np.sin((selected_data[loc]['L2ANG']-selected_data[loc]['C2ANG'])*(np.pi/180)))
Reacive[loc]['C']=selected_data[loc]['L3MAG']*selected_data[loc]['C3MAG']*(np.sin((selected_data[loc]['L3ANG']-selected_data[loc]['C3ANG'])*(np.pi/180)))
pf[loc]['A']=Active[loc]['A']/np.sqrt(np.square(Active[loc]['A'])+np.square(Reacive[loc]['A']))
pf[loc]['B']=Active[loc]['B']/np.sqrt(np.square(Active[loc]['B'])+np.square(Reacive[loc]['B']))
pf[loc]['C']=Active[loc]['C']/np.sqrt(np.square(Active[loc]['C'])+np.square(Reacive[loc]['C']))
selected_data[loc]['PA']=Active[loc]['A']
selected_data[loc]['PB']=Active[loc]['B']
selected_data[loc]['PC']=Active[loc]['C']
selected_data[loc]['QA']=Reacive[loc]['A']
selected_data[loc]['QB']=Reacive[loc]['B']
selected_data[loc]['QC']=Reacive[loc]['C']
selected_data[loc]['pfA']=pf[loc]['A']
selected_data[loc]['pfB']=pf[loc]['B']
selected_data[loc]['pfC']=pf[loc]['C']
#%%
# write python dict to a file
output = open('CompleteOneDay.pkl', 'wb')
pickle.dump(selected_data, output)
output.close()
#%%
#read a pickle file
pkl_file = open('CompleteOneDay.pkl', 'rb')
selected_data = pickle.load(pkl_file)
pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # it gets a vector which is a voltage angle of one phase and it will return frequancy diffrence in each time
# =============================================================================
# =============================================================================
def frequency(angle,span):
span=40
for i in range(int(angle.shape[0]/span)):
selected_angle=angle[i*span:i*(span)]
return df
#%%
def P2R(r, angles):
return r * np.exp(1j*angles)
def R2P(x):
return abs(x), angle(x)
#%%
r=selected_data['L1MAG'][11500:12000]
ang=(selected_data['L1ANG'][11500:12000]+180)*(2*np.pi/180)
v=P2R(r,ang)
p=selected_data['PA'][11500:12000]
vrated=7200
r=r/vrated
#%%
mat=[np.ones(r.shape[0]),r,r**2]
mat=np.array(mat).transpose()
#%%
a=np.linalg.lstsq(mat,p)
coeff=a[0]
#%%
pgen=np.matmul(mat,coeff)
plt.plot(np.absolute(pgen))
plt.plot(np.absolute(list(p.values)))
plt.show()
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,991
|
zyh88/PMU
|
refs/heads/master
|
/one item training.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 15:06:42 2019
@author: hamed
"""
mean=0
while mean!=4:
rnd={}
for i in range(epochnum):
rnd[i]=np.random.randint(low=0,high=N,size=batch_size)
# show(rnd[i])
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
kk=['TA']
for idx,key in enumerate(kk):
X_train_temp=X_train[:,(idx+6)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
#
# gan_name='gan_sep_onelearn_good_09_'+key+'.h5'
# gen_name='gen_sep_onelearn_good_09_'+key+'.h5'
# dis_name='dis_sep_onelearn_good_09_'+key+'.h5'
# print(dis_name)
# gan.save(gan_name)
# generator.save(gen_name)
# discriminator.save(dis_name)
scores_temp={}
probability_mean={}
anomalies_temp={}
#kk=['TA','TB','TC']
for idx,key in enumerate(kk):
print(key)
X_train_temp=X_train[:,(idx+6)]
#X_train.reshape(N,3*SampleNum)
X_train_temp=X_train_temp.reshape(N,SampleNum,1)
# id=int(np.floor(idx/3))
# mode=k[id*3]
# dis_name='dis_sep_onelearn_'+mode+'.h5'
#
# discriminator=load_model(dis_name)
rate=1000
shift=N/rate
scores_temp[key]=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train_temp[int(i*shift):int((i+1)*shift)])
scores_temp[key].append(temp)
print(i)
scores_temp[key]=np.array(scores_temp[key])
scores_temp[key]=scores_temp[key].ravel()
probability_mean[key]=np.mean(scores_temp[key])
data=scores_temp[key]-probability_mean[key]
mu, std = norm.fit(data)
zp=3
high=mu+zp*std
low=mu-zp*std
anomalies_temp[key]=np.union1d(np.where(data>=high)[0], np.where(data<=low)[0])
print(anomalies_temp[key].shape)
mean=np.mean(scores_temp['TA'])
mean=np.floor(int(mean*10))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,992
|
zyh88/PMU
|
refs/heads/master
|
/last_clustering.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import random
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from natsort import natsorted
from scipy import stats
from seaborn import heatmap
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # extract candidate for the clusters which extraxted by hand from July 03
# =============================================================================
# =============================================================================
# =============================================================================
cluster_folder_name='onedayclusters'
cluster_folder=os.listdir(cluster_folder_name)
separarted_events={}
cl_num=0
for cluster in cluster_folder:
separarted_events[cl_num]=[]
events=os.listdir(cluster_folder_name+'/'+cluster)
for ev in events:
separarted_events[cl_num].append(int(ev.split('.')[0]))
cl_num+=1
#%%
# =============================================================================
# =============================================================================
## call data which includes V, I and theta (9 features)
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#%%
# =============================================================================
# =============================================================================
# # standardized data
# =============================================================================
# =============================================================================
dds=load_standardized_data_with_features(filename,k)
#%%
# =============================================================================
# =============================================================================
# # normal data
# =============================================================================
# =============================================================================
dd=load_data_with_features(filename,k)
#%%
# =============================================================================
# =============================================================================
# # train data
# =============================================================================
# =============================================================================
start,SampleNum,N=(0,40,500000)
filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
tt=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
filename='data/Armin_Data/July_13/pkl/rawdata13.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
dds13=load_standardized_data_with_features(filename,k)
dd13=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#tt10=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
# =============================================================================
# =============================================================================
# # max corr coeff funciton based on each two event
# =============================================================================
# =============================================================================
def ccf(anom1,anom2,data):
# =============================================================================
# 480 time duration for each event
# =============================================================================
scale=6
shift=0
SampleNum=40
max_corr=-1
for i in range(120):
cr=0
for j in range(9):
cr+=np.corrcoef(data[j][anom1*int(SampleNum/2)-40*scale+shift:(anom1*int(SampleNum/2)+40*scale+shift)],
np.roll(data[j][anom2*int(SampleNum/2)-40*scale+shift:(anom2*int(SampleNum/2)+40*scale+shift)],i-60))[0,1]
cr=cr/9
if cr>max_corr:
max_corr=cr
return max_corr
#%%
#%%
# =============================================================================
# =============================================================================
# # max corr coeff funciton based on each two event
# =============================================================================
# =============================================================================
def ccfWithRepresentatives(anom1,rep1,data_anom):
# =============================================================================
# 480 time duration for each event
# =============================================================================
scale=6
shift=0
SampleNum=40
max_corr=-1
for i in range(120):
cr=0
for j in range(9):
cr+=np.corrcoef(data_anom[j][anom1*int(SampleNum/2)-40*scale+shift:(anom1*int(SampleNum/2)+40*scale+shift)],
np.roll(rep1[j],i-60))[0,1]
cr=cr/9
if cr>max_corr:
max_corr=cr
return max_corr
#%%
# =============================================================================
# =============================================================================
# # Training model - extract candidate for each pre selected cluster
# =============================================================================
# =============================================================================
def candidate_correlation(cluster_events,data):
#select number of events that we want to consider in each group for training
N=len(cluster_events)
trh=50
N=min(N,trh)
corr=np.zeros((N,N))
#restricted candidate
selected_events=np.random.choice(cluster_events, N, replace=False)
for idx1,anom1 in enumerate(selected_events):
print(idx1)
# if idx1% 100==0:
# print('iter num: %i', idx1)
tic=time.clock()
for idx2,anom2 in enumerate(selected_events):
if idx2>=idx1:
# if idx2% 100==0:
# print('iter num: %i', idx2)
max_corr=ccf(anom1,anom2,data)
corr[idx1,idx2]=max_corr
else:
corr[idx1,idx2]=corr[idx2,idx1]
toc = time.clock()
print(toc-tic)
index=np.argmax(sum(corr))
candid=selected_events[index]
return corr,candid
#%%
# =============================================================================
# =============================================================================
# # calculate candidate of each cluster
# =============================================================================
# =============================================================================
representatives={}
for cl in separarted_events:
cluster_event=separarted_events[cl]
_,representatives[cl]=candidate_correlation(separarted_events[cl],dds)
#%%
# =============================================================================
# =============================================================================
# # show representatives
# =============================================================================
# =============================================================================
for can in representatives:
show([representatives[can]],dd)
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # test the whole events one by one to see the accuracy of the candidates
# =============================================================================
# =============================================================================
# =============================================================================
test_event_clusters={}
for cl in separarted_events:
print(cl)
temp_cluster_evevnts=separarted_events[cl]
#check with the representative
count=0
for event in temp_cluster_evevnts:
if count<100:
print(event)
nearest_distance=-1
for can in representatives:
dist=ccf(event,representatives[can],dds)
if dist>nearest_distance:
nearest_distance=dist
closest_candidate=can
test_event_clusters[event]=[closest_candidate,cl]
count+=1
#%%
# =============================================================================
# =============================================================================
# # calculate the accuracy of the models (building multiclass confusion matrix)
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# # whole clusters even with the one phase events
# =============================================================================
# =============================================================================
cl_cum=len(separarted_events)
confusion_matrix=np.zeros((cl_num,cl_cum))
for cl in separarted_events:
print(cl)
temp_cluster_evevnts=separarted_events[cl]
#check with the representative
count=0
for event in temp_cluster_evevnts:
if count<100:
confusion_matrix[test_event_clusters[event][1],test_event_clusters[event][0]]+=1
count+=1
acc={}
acc['tp']=[]
acc['fp']=[]
acc['fn']=[]
acc['tn']=[]
for i in range(cl_num):
acc['tp'].append(confusion_matrix[i,i])
acc['fp'].append(sum(confusion_matrix[:,i])-confusion_matrix[i,i])
acc['fn'].append(sum(confusion_matrix[i,:])-confusion_matrix[i,i])
acc['tn'].append(sum(sum(confusion_matrix[:,:]))-acc['tp'][i]-acc['fp'][i]-acc['fn'][i])
#%%
# =============================================================================
# =============================================================================
# # total accuracy of clustering model
# =============================================================================
# =============================================================================
total_acccuracy=(sum(acc['tp'])+sum(acc['tn']))/(sum(acc['tp'])+sum(acc['tn'])+sum(acc['fp'])+sum(acc['fn']))
F1=(sum(acc['tp'])+sum(acc['tp']))/(sum(acc['tp'])+sum(acc['tp'])+sum(acc['fp'])+sum(acc['fn']))
#%%
ConfMtr={}
methods=['KNN','Kmed','fuzzy-cmedoids','proposed']
distances=['eu','dtw','soft-dtw','mcc']
cl_num=8
for m in methods:
ConfMtr[m]={}
for d in distances:
ConfMtr[m][d]=np.zeros((cl_num,cl_cum))
#%%
cluster_events_number=[100,35,13,100,13,34,100,54]
a={}
a[1]='40,9,9,9,9,9,9,9,2,18,2,2,2,2,2,2,1,1,7,1,1,1,1,1,9,9,9,40,9,9,9,9,1,1,1,1,7,1,1,1,2,2,2,2,2,18,2,2,9,9,9,9,9,9,40,9,4,4,4,4,4,4,4,26'
a[2]='55,6,6,7,6,6,7,6,2,22,2,2,2,2,2,2,1,1,8,1,1,1,1,1,7,6,6,55,6,6,7,6,1,1,1,1,8,1,1,1,2,2,2,2,2,21,2,2,7,6,6,7,6,6,55,6,3,3,3,3,3,3,3,32'
a[3]='53,7,7,7,7,7,7,7,2,21,2,2,2,2,2,2,1,1,8,1,1,1,1,1,7,7,7,53,7,7,7,7,1,1,1,1,8,1,1,1,2,2,2,2,2,21,2,2,7,7,7,7,7,7,53,7,3,3,3,3,3,3,3,31'
a[4]='68,5,5,4,5,5,4,5,2,18,2,2,2,2,2,2,1,1,5,1,1,1,1,1,4,5,5,68,5,5,4,5,1,1,1,1,5,1,1,1,2,2,2,2,2,17,2,2,4,5,5,4,5,5,68,5,3,4,3,3,3,4,3,30'
a[5]='48,7,7,7,7,7,7,7,2,20,2,2,2,2,2,2,1,1,8,1,1,1,1,1,7,7,7,48,7,7,7,7,1,1,1,1,8,1,1,1,2,2,2,2,2,20,2,2,7,7,7,7,7,7,48,7,4,3,3,4,3,3,4,30'
a[6]='92,1,1,1,1,1,1,1,1,28,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,92,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,27,1,1,1,1,1,1,1,1,92,1,1,1,1,1,1,1,1,46'
a[7]='91,1,1,2,1,1,2,1,1,25,1,1,1,1,1,1,1,1,4,1,1,1,1,1,2,1,1,91,1,1,2,1,1,1,1,1,4,1,1,1,1,1,1,1,1,24,1,1,2,1,1,2,1,1,91,1,2,1,1,2,1,1,2,44'
a[8]='95,1,1,1,1,1,1,1,1,29,1,1,1,1,1,1,1,1,7,1,1,1,1,1,1,1,1,95,1,1,1,1,1,1,1,1,7,1,1,1,1,1,1,1,1,28,1,1,1,1,1,1,1,1,95,1,1,1,1,1,1,1,1,48'
a[9]='47,7,7,8,7,7,8,8,2,20,2,2,2,2,2,2,1,1,7,1,1,1,1,1,8,7,7,47,7,7,8,8,1,1,1,1,7,1,1,1,2,2,2,2,2,19,2,2,8,7,7,8,7,7,47,8,4,4,4,4,4,4,4,29'
a[10]='93,1,1,1,1,1,1,1,1,28,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,93,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,27,1,1,1,1,1,1,1,1,93,1,1,1,1,1,1,1,1,47'
a[11]='91,1,1,1,1,1,1,1,1,27,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,91,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,26,1,1,1,1,1,1,1,1,91,1,1,1,1,1,1,1,1,45'
a[12]='91,1,1,1,1,1,1,1,1,27,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,91,1,1,1,1,1,1,1,1,6,1,1,1,1,1,1,1,1,26,1,1,1,1,1,1,1,1,91,1,1,1,1,1,1,1,1,45'
a[13]='37,9,9,9,9,9,9,9,2,18,2,2,2,2,2,2,1,1,7,1,1,1,1,1,9,9,9,37,9,9,9,9,1,1,1,1,7,1,1,1,2,2,2,2,2,17,2,2,9,9,9,9,9,9,37,9,4,4,4,4,4,4,4,25'
a[14]='97,0,0,0,0,0,0,0,1,30,1,1,1,1,1,1,1,1,7,1,1,1,1,1,0,0,0,97,0,0,0,0,1,1,1,1,7,1,1,1,1,1,1,1,1,29,1,1,0,0,0,0,0,0,97,0,1,1,1,1,1,1,1,49'
a[15]='95,1,1,1,1,1,1,1,1,29,1,1,1,1,1,1,1,1,7,1,1,1,1,1,1,1,1,95,1,1,1,1,1,1,1,1,7,1,1,1,1,1,1,1,1,28,1,1,1,1,1,1,1,1,95,1,1,1,1,1,1,1,1,48'
a[16]='99,0,0,0,0,0,0,0,0,32,0,0,0,0,0,0,1,1,9,1,1,1,1,1,0,0,0,99,0,0,0,0,1,1,1,1,9,1,1,1,0,0,0,0,0,31,0,0,0,0,0,0,0,0,99,0,0,0,0,0,0,0,0,52'
c=1
for d in distances:
for m in methods:
temppp=a[c].split(',')
for i,x in enumerate(temppp):
temppp[i]=int(x)
temppp=np.array(temppp).reshape(8,8)
ConfMtr[m][d]=temppp
c+=1
whoe_accuracyofclsuterings=[[0.4308,0.5676,0.5415,0.6298],[0.5192,0.8742,0.8519,0.8783],[0.4967,0.8753,0.8724,0.8724],[0.4167,0.9219,0.8783,0.9685]]
whoe_accuracyofclsuterings=np.array(whoe_accuracyofclsuterings)
whoe_accuracyofclsuterings=whoe_accuracyofclsuterings.transpose()
#%%
# =============================================================================
# =============================================================================
# # obtain the threshold for creating new cluster
### maximum distance between representatives
# =============================================================================
# =============================================================================
rep_dist=np.zeros((8,8))
for cl1 in representatives:
print(cl1)
for cl2 in representatives:
rep_dist[cl1,cl2]=ccf(representatives[cl1],representatives[cl2],dds)
#%%
# =============================================================================
# =============================================================================
# # Save the representative shapes from July 03
# =============================================================================
# =============================================================================
representative_data={}
scale=6
shift=0
SampleNum=40
for rep in representatives:
if rep <8:
anomm=representatives[rep]
representative_data[rep]=dds[:,anomm*int(SampleNum/2)-40*scale+shift:(anomm*int(SampleNum/2)+40*scale+shift)]
if rep==8:
start,SampleNum,N=(0,40,500000)
filename='data/Armin_Data/July_04/pkl/rawdata4.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
dds=load_standardized_data_with_features(filename,k)
anomm=representatives[rep]
representative_data[rep]=dds[:,anomm*int(SampleNum/2)-40*scale+shift:(anomm*int(SampleNum/2)+40*scale+shift)]
#detail about representatives
det_rep={3:[i for i in range(8)],4:[8],5:[],6:[],7:[],8:[],9:[]}
#%%
# =============================================================================
# =============================================================================
# # check the one day events (july 04) and make new clusters if it needed
# =============================================================================
# =============================================================================
#download the data for the considered day
total_event_cluster_data={}
ClusterNumber=len(representatives)
total_cluster_events={}
for i in representatives:
total_cluster_events[i]=[]
for day in [4,5,6,7,8,9]:
print(day)
total_event_cluster_data[day]={}
filename='data/Armin_Data/July_0'+str(day)+'/pkl/rawdata'+str(day)+'.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
data_04=load_standardized_data_with_features(filename,k)
#detected events in this day
event_folder_04='figures/all_events/July_0'+str(day)+'/GAN'
events_04=os.listdir(event_folder_04)
temp_ev_04=[]
for i in events_04:
temp_ev_04.append(i.split('.')[0])
events_04=temp_ev_04
# =============================================================================
# =============================================================================
# # check each event in july 04 to representetives and if it's below the treshold make new cluster
# =============================================================================
# =============================================================================
select_1224=data_04
trh=0.14
for count,event in enumerate(events_04):
if count% 100==0:
print('iter num: %i', count)
event=int(event)
#check the dist from representatives
max_similarity=-1
ClusterNumber=len(representatives)
for candid in representative_data:
sim=ccfWithRepresentatives(event,representative_data[candid],data_04)
if sim>max_similarity:
max_similarity=sim
best_candidate=candid
if max_similarity>trh:
total_cluster_events[best_candidate].append(event)
total_event_cluster_data[day][event]=best_candidate
# print('event is: ',event,'nearest candidate: ',best_candidate,'similarity: ',max_similarity)
else:
scale=6
shift=0
SampleNum=40
print('new cluter')
print('new cluster is: ',event,'nearest candidate was: ',best_candidate,'similarity was: ',max_similarity)
det_rep[day]=[ClusterNumber]
representatives[ClusterNumber]=event
representative_data[ClusterNumber]=data_04[:,event*int(SampleNum/2)-40*scale+shift:(event*int(SampleNum/2)+40*scale+shift)]
total_cluster_events[ClusterNumber]=[event]
total_event_cluster_data[day][event]=ClusterNumber
# show([event],select_1224)
#%%
##cap bank cluster
#cap_jul4_ev=[471359,471360,471361,48493,48494,48495]
#ccfJul4CapBank=np.zeros((6,6))
#for x1,i in enumerate(cap_jul4_ev):
# for x2,j in enumerate(cap_jul4_ev):
# ccfJul4CapBank[x1,x2]=ccf(i,j,data_04)
#
#
#index=np.argmax(sum(ccfJul4CapBank))
#candid=cap_jul4_ev[index]
#
#
#%%
# =============================================================================
# =============================================================================
# # Show each event we want from V, I and theta data
# =============================================================================
# =============================================================================
#select_1224=data_04
def showw(events,select_1224):
SampleNum=40
for anom in events:
print(anom)
anom=int(anom)
# anom=events[anom]
# print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
# figname=dst+"/"+str(anom)
# plt.savefig(figname)
plt.title('T')
plt.show()
#%%
# =============================================================================
# =============================================================================
# # save events figure in the same name folder but with V,I,T
# =============================================================================
# =============================================================================
fn='clusters/cls/'
fnfolders=os.listdir(fn)
for f in fnfolders:
clfolders=os.listdir(fn+f)
# print(f)
if f=='000000010' or f=='000000011' or f=='000000111':
print(f)
for cl in clfolders:
showevents=[]
imagelist=os.listdir(fn+f+'/'+cl)
for ev in imagelist:
showevents.append(int(ev.split('.')[0]))
destination='clusters/vit/'+f+'/'+cl
show(showevents,dd10,destination)
print(destination)
#%%
# =============================================================================
# =============================================================================
# # mistakes
# =============================================================================
# =============================================================================
mistakes_folder='clusters/vit/mistakes/'
show([347468],dd13,mistakes_folder)
#%%
# =============================================================================
# =============================================================================
# # Show each event we want from V, I and theta data
# =============================================================================
# =============================================================================
def show_representatives(rep):
# SampleNum=40
for anom in rep:
print(len(total_cluster_events[anom]))
print(anom)
# anom=events[anom]
# print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(rep[anom][i])
# plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(rep[anom][i])
# plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(rep[anom][i])
# plt.legend('A' 'B' 'C')
plt.title('T')
plt.show()
#%%
anomalies
#%%%
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# # # # # # groupby feature detected event
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
#all events for July 03
whole_anoms=[]
for f in anomalies:
whole_anoms.extend(anomalies[f])
whole_anoms=np.unique(whole_anoms)
#make embeded 9 features 0 and 1 for each event
July03_anomalies_detail={}
July03_anomalies_detail['event_time_chunk_number']=whole_anoms
July03_anomalies_detail['embeded_detection_features']=np.zeros((9,len(whole_anoms)))#9 is the independent number of features that we have
for idx,ev in enumerate(whole_anoms):
if idx% 100==0:
print(idx)
for fnum,f in enumerate(anomalies):
if np.isin(ev,anomalies[f]):
July03_anomalies_detail['embeded_detection_features'][fnum,idx]=1
else:
July03_anomalies_detail['embeded_detection_features'][fnum,idx]=0
#%%
#now we seperate all events based on first step clustering
Stage_one_event_clusters={}
for i in range(len(July03_anomalies_detail['event_time_chunk_number'])):
i=int(i)
embdftr=July03_anomalies_detail['embeded_detection_features'][:,i]
string=''
for j in embdftr:
string=string+str(int(j))
if string in Stage_one_event_clusters:
Stage_one_event_clusters[string].append(July03_anomalies_detail['event_time_chunk_number'][i])
else:
Stage_one_event_clusters[string]=[July03_anomalies_detail['event_time_chunk_number'][i]]
#%%
#inside each of these stge one clusters we should cluster them based on their simiarity
monitored_first_stage_clusters={}
monitored_first_stage_clusters['111111111']=Stage_one_event_clusters['111111111']#all features 3ph
monitored_first_stage_clusters['111000000']=Stage_one_event_clusters['111000000']#just V 3ph
monitored_first_stage_clusters['000111111']=Stage_one_event_clusters['000111111']# I and cos(theta) 3ph
# =============================================================================
monitored_first_stage_clusters['noise']=Stage_one_event_clusters['000000110']#noise cluster which is separated in the stage one
# =============================================================================
#monitored_first_stage_clusters['OnePhase']=Stage_one_event_clusters['111111111']
#%%
Stage_one_copy=Stage_one_event_clusters.copy()
for i in Stage_one_event_clusters:
if len(Stage_one_copy[i])<15:
del Stage_one_copy[i]
#%%
# =============================================================================
# =============================================================================
# # event clsuters in different days
# =============================================================================
# =============================================================================
cluster_per_day={}
for day in total_event_cluster_data.keys():
cluster_per_day[day]={}
selected_day_data=total_event_cluster_data[day]
for ev in selected_day_data:
cl=selected_day_data[ev]
cl_in_day=list(cluster_per_day[day].keys())
if cl in cl_in_day:
cluster_per_day[day][cl].append(ev)
else:
cluster_per_day[day][cl]=[ev]
#%%
# =============================================================================
# =============================================================================
# # number of cluster events in different days
# =============================================================================
# =============================================================================
cl_def={0:'back to back',1:'current step down', 2:'signature',3:'med',4:'noise',5:'1 or 2 phases',6:'inrush',7:'med 2',8:'cap bank',9:'hifreq',10:'hifreq',11:'hifreq'}
for day in cluster_per_day:
print('In July ',day,': ')
for cl in cluster_per_day[day]:
print('number of events in cluster ',cl_def[cl],' is ', len(cluster_per_day[day][cl]))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,993
|
zyh88/PMU
|
refs/heads/master
|
/clustering.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_standardized_data_with_features
from scipy import stats
from sklearn.ensemble import IsolationForest
import seaborn as sns; sns.set()
#%%
# =============================================================================
# =============================================================================
# # take out the event pointers from any kind of model
# =============================================================================
# =============================================================================
dir='figures/all_events/'
event_points={}
for i in [0]:
file=dir+'July_0'+str(i+3)
GAN_events_file=file+'/GAN/anoms_july_0'+str(i+3)+'.csv'
GAN_voltage_events_file=file+'/GAN_voltage/anoms_voltage_july_0'+str(i+3)+'.csv'
Window_events_file=file+'/window/anoms_july_0'+str(i+3)+'.csv'
GAN=pd.read_csv(GAN_events_file,header=None)[0].values
GANV=pd.read_csv(GAN_voltage_events_file,header=None)[0].values
window=pd.read_csv(Window_events_file,header=None)[0].values
GAN_events_file=file+'/no_event'+'.xlsx'
GAN_voltage_events_file=file+'/no_event_v'+'.xlsx'
GANN=pd.read_excel(GAN_events_file)
GANVN=pd.read_excel(GAN_voltage_events_file)
GANVN=GANVN['GAN voltage'].values
windowN=GANN['window'].values
GANN=GANN['GAN'].values
GANN = GANN[~np.isnan(GANN)]
GANVN = GANVN[~np.isnan(GANVN)]
windowN = windowN[~np.isnan(windowN)]
event_points[i+3]={}
event_points[i+3]['GAN_event']=np.setdiff1d(GAN,GANN)
event_points[i+3]['GANV_event']=np.setdiff1d(GANV,GANVN)
event_points[i+3]['GANV_total']=np.union1d(GAN,GANV)
event_points[i+3]['GAN_total_events']=np.union1d(event_points[i+3]['GAN_event'],event_points[i+3]['GANV_event'])
event_points[i+3]['window_event']=np.setdiff1d(window,windowN)
all_event_points=[]
for event in event_points[i+3]['GAN_total_events']:
# points=np
low=event*20-240
high=event*20+240
rng=np.arange(low,high)
all_event_points.append(rng)
all_event_points =np.array(all_event_points)
mutual_GAN_window=[]
for j in event_points[i+3]['window_event']:
if j in all_event_points:
mutual_GAN_window.append(j)
mutual_GAN_window=np.array(mutual_GAN_window)
event_points[i+3]['mutual_GAN_window']=mutual_GAN_window
whole_event_number=event_points[i+3]['GAN_total_events'].shape[0]+event_points[i+3]['window_event'].shape[0]-mutual_GAN_window.shape[0]
print(i)
#%%
# =============================================================================
# =============================================================================
# # save event data from real and standardized and the reduced mean as well
# =============================================================================
# =============================================================================
data_file='data/Armin_Data/July_03/pkl/J3.pkl'
std_data=load_standardized_data(data_file)
#%%
# =============================================================================
# =============================================================================
# # saving data for events
# =============================================================================
# =============================================================================
r_data=load_real_data(data_file)
scale=20
shift=240
real_data={}
std_no_mean_data={}
standard_data={}
for i in event_points[3]['GAN_total_events']:
i=int(i)
start=scale*i-shift
end=scale*i+shift
tempreal=r_data[:,start:end]
tempdata=std_data[:,start:end]
real_data[i]=tempreal
standard_data[i]=tempdata
tempdata=(tempdata-tempdata.mean(axis=1).reshape(-1,1))
std_no_mean_data[i]=tempdata
#%%
# =============================================================================
# =============================================================================
# # save all type of events data for July third
# =============================================================================
# =============================================================================
real="figures/all_events/July_03/real.pkl"
std="figures/all_events/July_03/std.pkl"
stdnomean="figures/all_events/July_03/stdnomean.pkl"
output = open(real, 'wb')
pkl.dump(real_data, output)
output.close()
output = open(std, 'wb')
pkl.dump(standard_data, output)
output.close()
output = open(stdnomean, 'wb')
pkl.dump(std_no_mean_data, output)
output.close()
#%%
# =============================================================================
# =============================================================================
# # laod the event point
# =============================================================================
# =============================================================================
stdnomean="figures/all_events/July_03/stdnomean.pkl"
pkl_file = open(stdnomean, 'rb')
std_no_mean_data = pkl.load(pkl_file)
pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # selected event points for testifg clustering methods
# =============================================================================
# =============================================================================
#selected_events=[350,351,11158,7417,21809,62447,42498,54563,66279,102488,103869
# ,103860,103871,105156,69018,57959,56316,309485,306447,295168
# ,255848,348846,348898,349143,349524,30855,28396,148978,49131,64830
# ,77780,67276,121772,400302]
def pltshow(inp):
ii=0
for anom in inp:
# print(corr[14][ii])
ii+=1
plt.subplot(221)
for i in [0,1,2]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%
# =============================================================================
# =============================================================================
# # test the dtw for selected events
# =============================================================================
# =============================================================================
euclidean_norm = lambda x, y: np.abs(x - y)
#d, cost_matrix, acc_cost_matrix, path = dtw(standard_data[350][0], standard_data[309485][0], dist=euclidean_norm)
#plt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')
#
#plt.plot(path[0], path[1], 'w')
#plt.show()
#%%
dtw_dists=[]
for i in selected_events:
print(i)
temp_dist=[]
for j in selected_events:
# distance, path = fastdtw(standard_data[i][3], standard_data[j][3], dist=euclidean)
distance=np.sum(euclidean_norm(std_no_mean_data[i][0], std_no_mean_data[j][0]))
temp_dist.append(distance)
temp_dist=np.array(temp_dist)
dtw_dists.append(temp_dist)
dtw_dists=np.array(dtw_dists)
#%%
#%%
events=np.array(list(std_no_mean_data.keys()))
evt_num=events.shape[0]
random_select=np.random.choice(evt_num, 500, replace=False)
selected_random_events=events[random_select]
#%%
N=len(std_no_mean_data.keys())
N=selected_random_events.shape[0]
corr=np.zeros((N,N))
for idx1,anom1 in enumerate(selected_random_events):
if idx1% 100==0:
print('iter num: %i', idx1)
tik=time.clock()
for idx2,anom2 in enumerate(selected_random_events):
if idx2>=idx1:
if idx2% 100==0:
print('iter num: %i', idx2)
max_corr=0
for i in range(120):
cr=0
for j in range(4):
cr+=np.corrcoef(std_no_mean_data[anom1][j*3],np.roll(std_no_mean_data[anom2][j*3],i-60))[0,1]
cr=cr/4
if cr>max_corr:
max_corr=cr
corr[idx1,idx2]=max_corr
else:
corr[idx1,idx2]=corr[idx2,idx1]
toc = time.clock()
print(toc-tik)
#%%
# =============================================================================
# =============================================================================
# # clustering by eliminating similar ones
# =============================================================================
# =============================================================================
trh=0.7
classes={}
count=0
remain=corr.shape[0]
while remain>1:
ax = sns.heatmap(corr)
# plt.plot(ax)
classes[count]=[]
del_ids=[]
rows=list(np.arange(1,remain+1)-1)
for id,h in enumerate(corr[0]):
if h> 0.7:
classes[count].append(selected_random_events[id])
del_ids.append(id)
rows.remove(id)
for i in del_ids:
for id,h in enumerate(corr[i]):
if h> 0.7:
if not selected_random_events[id] in classes[count]:
classes[count].append(selected_random_events[id])
if id in rows:
rows.remove(id)
count+=1
corr=corr[rows][:,rows]
remain=corr.shape[0]
plt.show()
#%%
trh=0.7
classes={}
count=0
remain=corr.shape[0]
sre=np.copy(selected_random_events)
#sre=list(sre)
corr=np.copy(correlation200)
while remain>0:
print(sre)
ax = sns.heatmap(corr)
# plt.plot(ax)
classes[count]=[]
del_ids=[]
rows=list(np.arange(1,remain+1)-1)
for id,h in enumerate(corr[0]):
if h> trh:
classes[count].append(sre[id])
del_ids.append(id)
rows.remove(id)
# for i in del_ids:
# for id,h in enumerate(corr[i]):
# if h> 0.7:
# if not selected_random_events[id] in classes[count]:
# classes[count].append(selected_random_events[id])
# if id in rows:
# rows.remove(id)
count+=1
corr=corr[rows][:,rows]
sre=np.array(sre)
sre=sre[rows]
remain=corr.shape[0]
plt.show()
#%%
for i in new_candidates:
# if len(classes[i])<10:
print(i)
i=[i]
pltshow(i)
#%%
trh=0.7
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
#%%
def corr_similar_grouping(corr,sre,trh):
classes={}
count=0
remain=corr.shape[0]
#sre=list(sre)
counter=0
while remain>0:
# print(remain)
# print(sre)
# if counter<5:
# ax = sns.heatmap(corr)
# plt.plot(ax)
classes[count]=[]
del_ids=[]
rows=list(np.arange(1,remain+1)-1)
for id,h in enumerate(corr[0]):
# print(id,h)
if h> trh:
col=np.copy(corr[:,id])
if col.shape[0]>1:
col=list(col)
maxcol=max(col)
col.remove(maxcol)
maxcol=max(col)
else:
col=list(col)
maxcol=max(col)
if h>=maxcol:
classes[count].append(sre[id])
del_ids.append(id)
rows.remove(id)
# for i in del_ids:
# for id,h in enumerate(corr[i]):
# if h> 0.7:
# if not selected_random_events[id] in classes[count]:
# classes[count].append(selected_random_events[id])
# if id in rows:
# rows.remove(id)
count+=1
corr=corr[rows][:,rows]
sre=np.array(sre)
sre=sre[rows]
remain=corr.shape[0]
plt.show()
counter+=1
return classes
#%%
# =============================================================================
# =============================================================================
# # pick the candidate of a cluster
# =============================================================================
# =============================================================================
def pick_the_candidate(group):
# print(group)
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
candidate=group[0]
max=0
if len(group)>0:
for i in group:
idx1=list(sre).index(i)
temp=0
for j in group:
idx2=list(sre).index(j)
if not i==j:
temp+=corr[idx1,idx2]
# print(temp)
if max<temp:
max=temp
candidate=i
return candidate
#%%
def find_all_candidates(classes):
candidates=[]
for cl in classes:
# print(cl)
candidates.append(pick_the_candidate(classes[cl]))
return candidates
#%%
def candidate_dist(candidates):
sre=np.copy(selected_random_events500)
sre=list(sre)
corr=np.copy(correlation500)
selected_idx=[]
for cand in candidates:
idx=sre.index(cand)
selected_idx.append(idx)
can_corr=corr[selected_idx][:,selected_idx]
return can_corr
#%%
def merge_clusters(candidates,classes):
count=0
new_class={}
new_corr=candidate_dist(candidates)
merge_candidates=corr_similar_grouping(new_corr,candidates,trh)
for ngr in merge_candidates:
new_class[count]=[]
for can in merge_candidates[ngr]:
idx=candidates.index(can)
selected_class=classes[idx]
new_class[count].append(selected_class)
new_class[count] = [item for sublist in new_class[count] for item in sublist]
count+=1
merge_candidates=find_all_candidates(new_class)
return merge_candidates,new_class
#%%
def clustering_point(classes):
def each_cluster_point(cl):
sre=np.copy(selected_random_events500)
sre=list(sre)
corr=np.copy(correlation500)
selected_idx=[]
for event in cl:
idx=sre.index(event)
selected_idx.append(idx)
can_corr=corr[selected_idx][:,selected_idx]
corr_sum=np.sum(can_corr)
corr_sum=corr_sum/len(cl)
return corr_sum
point=0
for cl in classes:
point+=each_cluster_point(classes[cl])
return point
#%%
# =============================================================================
# =============================================================================
# #recursive correlation clustering
# =============================================================================
# =============================================================================
eps=0.01
ind=100
trh=0.7
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
checking=0
#extract the classes
classes=corr_similar_grouping(corr,sre,trh)
#who's teh candidate
candidates=find_all_candidates(classes)
#distance correlation between the candidates
cand_dist=candidate_dist(candidates)
while checking==0:
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
#merge the clustersm
new_candidates,new_classes=merge_clusters(candidates,classes)
#check the candidates
if new_candidates==candidates:
checking=1
candidates=new_candidates
classes=new_classes
#%%
class_numbers=len(list(classes.keys()))
NC=20
trh=0.69
step=0.01
while trh>=0.6:
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
#merge the clustersm
new_candidates,new_classes=merge_clusters(candidates,classes)
#check the candidates
candidates=new_candidates
classes=new_classes
class_numbers=len(list(classes.keys()))
trh=trh-step
print(clustering_point(classes))
print(class_numbers)
print(trh)
#%%
sre=np.copy(selected_random_events500)
corr=np.copy(correlation500)
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,994
|
zyh88/PMU
|
refs/heads/master
|
/SaveDifferentTypesOfLoad.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import random
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from natsort import natsorted
from scipy import stats
from seaborn import heatmap
import scipy
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_train_vitheta_data_V,load_data_with_features,load_standardized_data_with_features
#%%
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
#%%
# importing data from a file function
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
# =============================================================================
# =============================================================================
# # save data with V I and theta
# =============================================================================
# =============================================================================
for n in [3]:
if n<10:
dir="../../UCR/PMU data/Data/July_0"+str(n)+"/"
else:
dir="../../UCR/PMU data/Data/July_"+str(n)+"/"
#dir='data/Armin_Data/July_03'
#os.listdir('../../UCR/PMU data/Data')
foldernames=os.listdir(dir)
selected_files=np.array([])
for f in foldernames:
spl=f.split('_')
if 'Hunter' in spl:
selected_files=np.append(selected_files,f)
selected_files
filenames1224=natsorted(selected_files)
filenames1224
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
whole_data=np.array([])
for count,file in enumerate(filenames1224):
print(count,file)
cosin={}
# Reacive={}
# keys={}
# pf={}
selected_data=OneFileImport(file,dir)
cosin['TA']=np.cos((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180))
cosin['TB']=np.cos((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180))
cosin['TC']=np.cos((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180))
# Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
# Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
# Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['TA']=cosin['TA']
selected_data['TB']=cosin['TB']
selected_data['TC']=cosin['TC']
selected_data=selected_data.drop(columns=['Unnamed: 0','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang'])
#
# selected_data['QA']=Reacive['A']
# selected_data['QB']=Reacive['B']
# selected_data['QC']=Reacive['C']
#
if count==0:
whole_data=selected_data.values
else:
whole_data=np.append(whole_data,selected_data.values,axis=0)
#
# k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','L1Ang','L2Ang','L3Ang','C1Ang','C2Ang','C3Ang']
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
day_data={}
day_data['1224']={}
c=0
for key in k:
day_data['1224'][key]=whole_data[:,c]
c+=1
# if n<10:
# dir="data/Armin_Data/July_sep_0"+str(n)+"/pkl"
# else:
# dir="data/Armin_Data/July_sep_"+str(n)+"/pkl"
# dir_name=dir
# os.mkdir(dir_name)
# write python dict to a file
if n<10:
dir="data/Armin_Data/July_0"+str(n)+"/pkl/rawdata" + str(n) + ".pkl"
else:
dir="data/Armin_Data/July_"+str(n)+"/pkl/rawdata" + str(n) + ".pkl"
output = open(dir, 'wb')
pkl.dump(day_data, output)
output.close()
print(n)
#%%
filename='data/Armin_Data/July_03/pkl/rawdata3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds14=load_standardized_data_with_features(filename,k)
dd3=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#filename='data/Armin_Data/July_03/pkl/julseppf3.pkl'
#k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#tt14=load_train_vitheta_data_V(start,SampleNum,N,filename,k)
#%%
%matplotlib inline
ev=[53766,355644]
dst='clusters/vit/111111111/cap'
show(ev,dd3,dst)
%matplotlib auto
#%%
def show(events,select_1224,dst):
SampleNum=40
for anom in events:
print(anom)
anom=int(anom)
# anom=events[anom]
# print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.title('T')
plt.show()
#%%
def just_show(events,select_1224):
shift=240
SampleNum=40
for anom in events:
print(anom)
anom=int(anom)
# anom=events[anom]
# print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-shift:(anom*int(SampleNum/2)+shift)])
# plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-shift:(anom*int(SampleNum/2)+shift)])
# plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-shift:(anom*int(SampleNum/2)+shift)])
# plt.legend('A' 'B' 'C')
# figname=dst+"/"+str(anom)
# plt.savefig(figname)
plt.title('T')
plt.show()
#%%
x = data_matlab[2]
w = np.fft.fft(x)
freqs = np.fft.fftfreq(len(x))
for coef,freq in zip(w,freqs):
if coef:
print('{c:>6} * exp(2 pi i t * {f})'.format(c=coef,f=freq))
#%%
v=0
for inx,f in enumerate(w):
if inx>0:
if np.absolute(f)>v:
v=np.absolute(np.real(f))
bid=inx
print(freqs[bid])
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,995
|
zyh88/PMU
|
refs/heads/master
|
/new clustering.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
import loading_data
from loading_data import load_real_data, load_standardized_data,load_train_data,load_train_data_V,load_standardized_data_with_features,load_data_with_features
from scipy import stats
from sklearn.ensemble import IsolationForest
import seaborn as sns; sns.set()
#%%
# =============================================================================
# =============================================================================
# # selected 3phase for clustering, saved in the data file clustering
# =============================================================================
# =============================================================================
selected_events_for_clustering
#%%
# =============================================================================
# =============================================================================
# # save event data from real and standardized and the reduced mean as well
# =============================================================================
# =============================================================================
data_file='data/Armin_Data/July_03/pkl/julseppf3.pkl'
features=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
std_data=load_standardized_data_with_features(data_file,features)
#%%
# =============================================================================
# =============================================================================
# # saving data for events
# =============================================================================
# =============================================================================
r_data=load_data_with_features(data_file,features)
scale=20
shift=240
#%%
real_data={}
std_no_mean_data={}
standard_data={}
for i in sample_events:
i=int(i)
start=scale*i-shift
end=scale*i+shift
tempreal=r_data[:,start:end]
tempdata=std_data[:,start:end]
real_data[i]=tempreal
standard_data[i]=tempdata
tempdata=(tempdata-tempdata.mean(axis=1).reshape(-1,1))
std_no_mean_data[i]=tempdata
#%%
# =============================================================================
# =============================================================================
# # save all type of events data for July third
# =============================================================================
# =============================================================================
real="figures/all_events/July_03/new_real_3ph.pkl"
std="figures/all_events/July_03/new_std_3ph.pkl"
stdnomean="figures/all_events/July_03/new_stdnomean_3ph.pkl"
output = open(real, 'wb')
pkl.dump(real_data, output)
output.close()
output = open(std, 'wb')
pkl.dump(standard_data, output)
output.close()
output = open(stdnomean, 'wb')
pkl.dump(std_no_mean_data, output)
output.close()
#%%
# =============================================================================
# =============================================================================
# # laod the event point
# =============================================================================
# =============================================================================
stdnomean="figures/all_events/July_03/new_stdnomean_3ph.pkl"
pkl_file = open(stdnomean, 'rb')
std_no_mean_data = pkl.load(pkl_file)
pkl_file.close()
#%%
def showstd(events):
for anom in events:
anom=int(anom)
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(std_no_mean_data[anom][i])
plt.legend('A' 'B' 'C')
plt.title('T')
plt.subplot(224)
# for i in [9,10,11]:
# plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
# plt.title('Q')
plt.show()
#%%
#%%
#considered_events=selected_events_for_clustering[0:100]
#%%
# =============================================================================
# =============================================================================
# # medoids searching method iteratively
# =============================================================================
# =============================================================================
def initial_medoids(class_number):
medoids=np.random.choice(considered_events, class_number, replace=False)
return medoids
#%%
def similarity(event1,event2):
max_corr=-10
for i in range(120):
cr=0
for j in range(3):
cr+=np.corrcoef(std_no_mean_data[event1][j],np.roll(std_no_mean_data[event2][j],i-60))[0,1]
cr=cr/3
# print(cr)
if cr>max_corr:
max_corr=cr
sim=max_corr
return sim
#%%
def cluster_assigned(old_medoids):
new_clusters={}
sum_sims={}
for med in old_medoids:
sum_sims[med]=0
new_clusters[med]=[]
for event in considered_events:
close=-10
# assigend_cluster=0
for med in old_medoids:
sim=similarity(event,med)
if sim>close:
close=sim
assigend_cluster=med
sum_sims[assigend_cluster]+=close
new_clusters[assigend_cluster].append(event)
return new_clusters,sum_sims
#%%
def new_med(new_cluster):
N=len(new_cluster)
corr=np.zeros((N,N))
# print(N)
for idx1,event1 in enumerate(new_cluster):
# if idx1% 100==0:
# print('iter num: %i', idx1)
# tik=time.clock()
for idx2,event2 in enumerate(new_cluster):
if idx2>=idx1:
# if idx2% 100==0:
# print('iter num: %i', idx2)
corr[idx1,idx2]=similarity(event1,event2)
else:
corr[idx1,idx2]=corr[idx2,idx1]
col_sum=np.sum(corr,axis=0)
new_med_index=np.max(col_sum)
event_list=list(col_sum)
new_med_index=event_list.index(np.max(new_med_index))
new_medoid=new_cluster[new_med_index]
return new_medoid
#%%
# =============================================================================
# finding the best medoids based on cluster numbers
# =============================================================================
considered_events=np.random.choice(selected_events_for_clustering, 400, replace=False)
crt=0
class_number=6
init_medoids=initial_medoids(class_number)
first_step=0
iter=0
objective=[]
while crt==0:
print(iter)
if first_step==0:
old_medoids=init_medoids
new_medoids=init_medoids
first_step=1
new_clusters,sum_sims=cluster_assigned(old_medoids)
objective.append(sum(sum_sims.values()))
iter+=1
new_medoids=[]
for cluster in new_clusters:
new_medoids.append(new_med(new_clusters[cluster]))
print(new_medoids,old_medoids)
nm=[]
for i in new_medoids:
nm.append(int(i))
nm.sort()
om=[]
for i in old_medoids:
om.append(int(i))
om.sort()
count=0
for i in om:
if i in nm:
count+=1
print(count)
if count==class_number:
crt=1
old_medoids=new_medoids
#%%
# =============================================================================
# sample data correlation matrix
# =============================================================================
sample_shape=considered_events.shape[0]
selected_corr=np.zeros((sample_shape,sample_shape))
#%%
# =============================================================================
# =============================================================================
# # Event clusters extracting from folder
# =============================================================================
# =============================================================================
clusters={}
clusters_together=[]
for i in os.listdir('clusters'):
clusters[i]=[]
for e in os.listdir('clusters/'+i):
clusters[i].append(e.split('.')[0])
clusters_together.append(e.split('.')[0])
#%%
sample_events=['350','351','3182','4743','7419','49465',
'57881','67737','69018','88255','254519',
'127594','144417','12901','254742','12914','13130','26959','30703',
'496291']
sample_events=clusters_together
sample_events=np.array(sample_events)
sample_events_int=[int(x) for x in sample_events]
#%%
real_data={}
std_no_mean_data={}
standard_data={}
for i in sample_events:
i=int(i)
start=scale*i-shift
end=scale*i+shift
tempreal=r_data[:,start:end]
tempdata=std_data[:,start:end]
real_data[i]=tempreal
standard_data[i]=tempdata
tempdata=(tempdata-tempdata.mean(axis=1).reshape(-1,1))
std_no_mean_data[i]=tempdata
#%%
# =============================================================================
# correlation function
# =============================================================================
def event_corr(sample_events_int,std_no_mean_data):
# N=len(std_no_mean_data.keys())
N=sample_events.shape[0]
corr=np.zeros((N,N))
for idx1,anom1 in enumerate(sample_events_int):
if idx1% 100==0:
print('iter num: %i', idx1)
tik=time.clock()
for idx2,anom2 in enumerate(sample_events_int):
if idx2>=idx1:
if idx2% 100==0:
print('iter num: %i', idx2)
max_corr=0
for i in range(120):
cr=0
for j in range(9):
cr+=np.corrcoef(std_no_mean_data[anom1][j],np.roll(std_no_mean_data[anom2][j],i-60))[0,1]
cr=cr/9
if cr>max_corr:
max_corr=cr
corr[idx1,idx2]=max_corr
else:
corr[idx1,idx2]=corr[idx2,idx1]
toc = time.clock()
print(toc-tik)
return corr
#%%
# =============================================================================
# correlation of the selected sample events
# =============================================================================
sample_corr=event_corr(sample_events_int,std_no_mean_data)
#%%
# =============================================================================
# =============================================================================
# # save samples and corr in order to pass to the matlab
# =============================================================================
# =============================================================================
import numpy as np
import scipy.io
scipy.io.savemat('correvent179.mat', dict(corr=sample_corr, events=sample_events_int))
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,996
|
zyh88/PMU
|
refs/heads/master
|
/just test.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 14:50:30 2019
@author: hamed
"""
Index(['Unnamed: 0', 'L1Mag', 'L2Mag', 'L3Mag', 'L1Ang', 'L2Ang', 'L3Ang',
'C1Mag', 'C2Mag', 'C3Mag', 'C1Ang', 'C2Ang', 'C3Ang', 'PA', 'PB', 'PC',
'QA', 'QB', 'QC'],
dtype='object')
#%%
dir='data/Armin_Data/July_03'
foldernames=os.listdir(dir)
selected_files=np.array([])
for f in foldernames:
spl=f.split('_')
if 'Bld' in spl:
selected_files=np.append(selected_files,f)
selected_files
filenames1224=natsort.natsorted(selected_files)
filenames1224
def OneFileImport(filename,dir):
dir_name=dir
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
whole_data_hun=np.array([])
for count,file in enumerate(filenames1224):
print(count,file)
Active={}
Reacive={}
keys={}
pf={}
selected_data=OneFileImport(file,dir)
Active['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.cos((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Active['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.cos((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Active['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.cos((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
Reacive['A']=selected_data['L1Mag']*selected_data['C1Mag']*(np.sin((selected_data['L1Ang']-selected_data['C1Ang'])*(np.pi/180)))
Reacive['B']=selected_data['L2Mag']*selected_data['C2Mag']*(np.sin((selected_data['L2Ang']-selected_data['C2Ang'])*(np.pi/180)))
Reacive['C']=selected_data['L3Mag']*selected_data['C3Mag']*(np.sin((selected_data['L3Ang']-selected_data['C3Ang'])*(np.pi/180)))
#
#pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
#pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
#pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['PA']=Active['A']
selected_data['PB']=Active['B']
selected_data['PC']=Active['C']
selected_data['QA']=Reacive['A']
selected_data['QB']=Reacive['B']
selected_data['QC']=Reacive['C']
if count==0:
whole_data_hun=selected_data.values
else:
whole_data_hun=np.append(whole_data_hun,selected_data.values,axis=0)
#%%
anom=2250900
sel=whole_data_hun[anom-240:anom+240]
c=0
for i in range(3):
vm=sel[:,i+c+1]
va=sel[:,i+4]-sel[:,4]
# p=P2R(vm,va)
# plt.plot(p.real,p.imag)
plt.plot(vm)
plt.show()
sel=whole_data[anom-240:anom+240]
for i in range(3):
vm=sel[:,i+c+1]
va=sel[:,i+4]-sel[:,4]
# p=P2R(vm,va)
# plt.plot(p.real,p.imag)
plt.plot(vm)
#%%
for i in range(3):
vm=sel[:,i+1]
va=sel[:,i+4]-sel[:,4]
p=P2R(vm,va)
plt.plot(p.real,p.imag)
plt.show()
#%%
def P2R(radii, angles):
return radii * np.exp(1j*angles)
#%%
p=P2R(vm,va)
fig,ax = plt.subplots()
ax.scatter(p.real,p.imag)
#%%
dir='data/Armin_Data/'
foldernames=os.listdir(dir)
filenames=natsort.natsorted(foldernames)
for fl in ['July_08']:
print(fl)
dist=dir+fl+'/pkl/J'+str(8)+'.pkl'
pkl_file = open(dist, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
plt.plot(selected_data['1224']['L1MAG'])
plt.show()
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,997
|
zyh88/PMU
|
refs/heads/master
|
/testGANtoStatisticmodel.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
#%%
# =============================================================================
# =============================================================================
# # train data prepreation
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_data(start,SampleNum,N,filename):
#read a pickle file
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
for pmu in ['1224']:
selected_data[pmu]=pd.DataFrame.from_dict(selected_data[pmu])
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
print(selected_data.keys())
select=[]
for f in features:
select.append(selected_data[pmu][f])
selected_data=0
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
# selected_data=0
end=start+SampleNum
shift=int(SampleNum/2)
train_data=np.zeros((N,12,SampleNum))
# reduced_mean=np.zeros((12,20))
for i in range(N):
if i% 1000==0:
print('iter num: %i', i)
temp=select[:,start+i*shift:end+i*shift]
temp=(temp-temp.mean(axis=1).reshape(-1,1)) ## reduced mean
# temp = preprocessing.scale(temp,axis=1) ## standardized
# reduced_mean=np.concatenate((reduced_mean,temp[:,0:20]),axis=1)
train_data[i,:]=temp
# convert shape of x_train from (60000, 28, 28) to (60000, 784)
# 784 columns per row
return train_data#,select,selected_data#,select_proc,reduced_mean
#X_train=load_data()
#print(X_train.shape)
#%%
# =============================================================================
# =============================================================================
# # real data extraxtion
# =============================================================================
# =============================================================================
#filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
#%%
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
#%%
def create_generator():
generator=Sequential()
generator.add(CuDNNLSTM(units=256,input_shape=(100,1),return_sequences=True))
generator.add(LeakyReLU(0.2))
generator.add(CuDNNLSTM(units=512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(units=512))
generator.add(LeakyReLU(0.2))
#
# generator.add(LSTM(units=1024))
# generator.add(LeakyReLU(0.2))
generator.add(Dense(units=12*40))
generator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return generator
g=create_generator()
g.summary()
#%%
def create_discriminator():
discriminator=Sequential()
discriminator.add(CuDNNLSTM(units=256,input_shape=(40,12),return_sequences=True))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(CuDNNLSTM(units=512))
discriminator.add(LeakyReLU(0.2))
#
discriminator.add(Dense(units=512))
discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
#
# discriminator.add(LSTM(units=256))
# discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(units=1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
return discriminator
d =create_discriminator()
d.summary()
#%%
def create_gan(discriminator, generator):
discriminator.trainable=False
gan_input = Input(shape=(100,1))
x = generator(gan_input)
x = Reshape((40,12), input_shape=(12*40,1))(x)
gan_output= discriminator(x)
gan= Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
gan = create_gan(d,g)
gan.summary()
#%%
batch_size=100
epochnum=1000
#%%
#%%
start,SampleNum,N=(0,40,1000)
#X_train = load_data(start,SampleNum,N)
filename=
X_train = load_data(start,SampleNum,N,filename)
batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
generator= create_generator()
discriminator= create_discriminator()
gan = create_gan(discriminator, generator)
#%%
def training(generator,discriminator,gan,epochs, batch_size):
scale=1
for e in range(1,epochs+1 ):
tik=time.clock()
print("Epoch %d" %e)
for _ in tqdm(range(batch_size)):
#generate random noise as an input to initialize the generator
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(batch_size,SampleNum,12)
# print(generated_images.shape)
# Get a random set of real images
image_batch =X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]
# print(image_batch.shape)
#Construct different batches of real and fake data
X= np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis=np.zeros(2*batch_size)
y_dis[:batch_size]=0.9
#Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable=True
discriminator.train_on_batch(X, y_dis)
#Tricking the noised input of the Generator as real data
noise= scale*np.random.normal(0,1, [batch_size, 100])
noise=noise.reshape(batch_size,100,1)
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
#We can enforce that by setting the trainable flag
discriminator.trainable=False
#training the GAN by alternating the training of the Discriminator
#and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
toc = time.clock()
print(toc-tik)
# if e == 1 or e % 5 == 0:
#
# plot_generated_images(e, generator)
#batch_size=0
tic = time.clock()
training(generator,discriminator,gan,epochnum,batch_size)
toc = time.clock()
print(toc-tic)
#%%
##
#gan.save('GPU_gan_mul_LSTM_twolayer_N500000_e1000_b10_1225.h5')
#generator.save('GPU_generator_mul_LSTM_twolayer_N500000_e1000_b10_1225.h5')
#discriminator.save('GPU_discriminator_mul_LSTM_twolayer_N500000_e1000_b10_1225.h5')
#%%
gan=load_model('GPU_gan_mul_LSTM_twolayer_N500000_e1000_b100.h5')
generator=load_model('GPU_generator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
discriminator=load_model('GPU_discriminator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
#%%
filename='data/Armin_Data/July_13/pkl/J13.pkl'
start,SampleNum,N,filename=(0,40,500000,filename)
X_train= load_data(start,SampleNum,N,filename)
#batch_count = X_train.shape[0] / batch_size
#%%
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
#%%
rate=1000
shift=N/rate
scores=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores.append(temp)
print(i)
scores=np.array(scores)
scores=scores.ravel()
#%%
#%%
probability_mean=np.mean(scores)
a=scores-probability_mean
#%%
#fig_size = plt.rcParams["figure.figsize"]
#
#
## Set figure width to 12 and height to 9
#fig_size[0] = 8
#fig_size[1] = 6
#plt.plot(a.ravel())
#plt.show()
#%%
# =============================================================================
# =============================================================================
# # determining the higher and uper bound based on the train data
# =============================================================================
# =============================================================================
data = a
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
# Plot the histogram.
plt.hist(data, bins=25, density=True, alpha=0.6, color='g')
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
#%%
# =============================================================================
# =============================================================================
# #GAN model calling
# =============================================================================
# =============================================================================
gan=load_model('GPU_gan_mul_LSTM_twolayer_N500000_e1000_b100.h5')
generator=load_model('GPU_generator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
discriminator=load_model('GPU_discriminator_mul_LSTM_twolayer_N500000_e1000_b100.h5')
# =============================================================================
# Reading the files in the data to make a for
# =============================================================================
files=os.listdir('data/Armin_Data')
#%%
selected_files=[]
for f in files:
s=f.split('_')
if 'July' in s:
selected_files.append(f)
#%%
# =============================================================================
# make a place to save all 1224 events data wrt each day, whether my method or Alirezas
# =============================================================================
dst="figures/all_events"
os.mkdir(dst)
#%%
#for num,file in enumerate(selected_files):
for file in ['July_17']:
num=14
if file == 'July_03':
continue
# =============================================================================
# extract train data for the selected day
# =============================================================================
print(file)
start,SampleNum,N=(0,40,500000)
dir="data/Armin_Data/"+ file + "/pkl/"
# selectedfile=os.listdir(dir+str(num+3))
filename = dir+'J'+str(num+3)+'.pkl'
X_train= load_data(start,SampleNum,N,filename)
#batch_count = X_train.shape[0] / batch_size
X_train=X_train.reshape(N,12*SampleNum)
X_train=X_train.reshape(N,SampleNum,12)
# =============================================================================
# calculate the score for the selected day
# =============================================================================
#a=discriminator.predict_on_batch(X_train)
rate=1000
shift=N/rate
scores=[]
for i in range(rate-1):
temp=discriminator.predict_on_batch(X_train[int(i*shift):int((i+1)*shift)])
scores.append(temp)
print(i)
scores=np.array(scores)
scores=scores.ravel()
probability_mean=np.mean(scores)
a=scores-probability_mean
# =============================================================================
# obtain the boundaries for events
# =============================================================================
zp=2
data = a
# Fit a normal distribution to the data:
mu, std = norm.fit(data)
high=mu+zp*std
low=mu-zp*std
anoms_1224=np.union1d(np.where(a>=high)[0], np.where(a<=low)[0])
print(anoms_1224.shape)
# =============================================================================
# select the real data for the day
# =============================================================================
select_1224=load_real_data(filename)
# =============================================================================
# make file to save photos for the GAN model
# =============================================================================
dst="figures/all_events/"+file
# os.mkdir(dst)
dst=dst+"/GAN"
os.mkdir(dst)
# =============================================================================
# save training number period as an events
# =============================================================================
anomcsvfile=dst+"/anoms_"+file+".csv"
np.savetxt(anomcsvfile, anoms_1224, delimiter=",")
event_points=[]
for anom in anoms_1224:
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
plt.legend('A' 'B' 'C')
plt.title('Q')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.show()
# =============================================================================
# find the wide range of anomalies point to compare with Alirezas data
# =============================================================================
low=anom*20-240
high=anom*20+240
rng=np.arange(low,high)
event_points.append(rng)
event_points=np.array(event_points).ravel()
#%%
# =============================================================================
# =============================================================================
# # read pointers from matlab file: (Alireza's results)
# =============================================================================
# =============================================================================
pointers = loadmat('data/pointer.mat')
pf='Jul'+"_"+file.split('_')[1]
points=pointers['pointer'][pf][0][0].ravel()
points.sort()
# =============================================================================
# common anomalies GAN and window
# =============================================================================
common_anoms=np.intersect1d(points,event_points)
dst="figures/all_events/"+file
anomcsvfile=dst+"/common"+file+".csv"
np.savetxt(anomcsvfile, common_anoms, delimiter=",")
# =============================================================================
# make folder to save Alirezas event in the same day
# =============================================================================
dst="figures/all_events/"+file
dst=dst+"/window"
os.mkdir(dst)
# =============================================================================
# save the window method event points
# =============================================================================
anomcsvfile=dst+"/anoms_"+file+".csv"
np.savetxt(anomcsvfile, points, delimiter=",")
for anom in points:
print(anom)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(select_1224[i][anom-240:(anom+240)])
plt.legend('A' 'B' 'C')
plt.title('Q')
figname=dst+"/"+str(anom)
plt.savefig(figname)
plt.show()
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,998
|
zyh88/PMU
|
refs/heads/master
|
/journal paper images.py
|
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # save heatmap to show correlation between features
# =============================================================================
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_03/pkl/rawdata3.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds14=load_standardized_data_with_features(filename,k)
dd3=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#%%
#Manual representatives for the journal paper
class Candidates():
def Data(self,name,day,window):
self.day=day
self.window=window
self.name=name
def lr(self,l,r):
self.l=l
self.r=r
#%%
Clustersname=['inrush','capbank','med','twostepmed','dynamic','currentdown','vsag','vdown','vfreq','backtoback','onetwo','noise']
Day=[3,3,3,3,3,3,3,3,14,3,3,3]
Window=[219430,425659,88255,90415,347701,11816,46382,30703,453528,323233,13652,103866]
# =============================================================================
# =============================================================================
# # old l and r
# =============================================================================
# =============================================================================
#l=[0,0,30,140,740,-10,40,10,700,280,30,150]
#r=[40,35,230,240,140,60,270,70,150,80,65,150]
# =============================================================================
# =============================================================================
# # new l and r
# =============================================================================
# =============================================================================
l=[80,0,130,240,740,40,140,60,700,280,90,150]
r=[120,35,330,340,140,110,370,120,150,80,140,150]
reps={}
for i,n in enumerate(Clustersname):
reps[n]=Candidates()
reps[n].Data(n,Day[i],Window[i])
reps[n].lr(l[i],r[i])
#reps['big']=Candidates()
#reps['big'].Data('big',3,347701)
#%%
# =============================================================================
# =============================================================================
# # save events for journal
# =============================================================================
# =============================================================================
dst='journal/newchanges/'
def show_event(ev,select_1224,dst):
SampleNum=40
c=['r','b','k']
anom=ev.window
print(anom)
anom=int(anom)
# anom=events[anom]
# print(anom)
space1=ev.l
space=ev.r
# plt.set(adjustable='box-forced', aspect='equal')
ax1=plt.subplot(311)
# ax1.set(adjustable='box', aspect='equal')
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
plt.legend([r'Phase A', r'Phase B', r'Phase C'],loc=1,fontsize= 'x-small')
# plt.legend([r'Phase A', r'Phase B', r'Phase C'],loc='best',fontsize= 'x-small', bbox_to_anchor=(0.6, 0.25, 0.35, 0.35))
plt.ylabel(r'$|V|_{(v)}$',fontsize=10)
# plt.axis('equal')
# plt.title('V')
ax2=plt.subplot(312,sharex=ax1)
# ax2.set(adjustable='box', aspect='equal')
for i in [3,4,5]:
from matplotlib.ticker import StrMethodFormatter
# plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # No decimal places
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
plt.ylabel(r'$|I|_{(Amp)}$',fontsize=10)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}')) # 2 decimal places
ax3=plt.subplot(313,sharex=ax1)
# ax3.set(adjustable='box', aspect='equal')
for i in [6,7,8]:
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}')) # 2 decimal places
# plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
# plt.axis('equal')
# plt.legend('A' 'B' 'C')
plt.xlim(0,space1+space-1)
plt.ylabel(r'$cos(\theta)$',fontsize=10)
hfont = {'fontname':'serif'}
plt.xlabel('Timestamps',fontsize=11,**hfont)
# plt.xlabel('Timestamps',fontsize=15)
figname=dst+ev.name+'_'+str(ev.window)+'_Jul'+str(ev.day)
plt.savefig(figname,dpi=800)
plt.grid(True)
plt.axis('equal')
plt.show()
close()
#%%
Clustersname=['inrush','capbank','med','twostepmed','dynamic','currentdown','vsag','vdown','vfreq','backtoback','onetwo','noise']
%matplotlib auto
ev=reps['vdown']
dst='journal/newchanges/'
show_event(ev,dd3,dst)
#%matplotlib auto
#%%
# =============================================================================
# =============================================================================
# # big event picture
# =============================================================================
# =============================================================================
select_1224=dd3
SampleNum=40
c=['r','b','k']
ev=reps['dynamic']
anom=ev.window
space1=740
space=140
# plt.set(adjustable='box-forced', aspect='equal')
#ax1=plt.subplot(311)
# ax1.set(adjustable='box', aspect='equal')
for i in [3,4,5]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
plt.xlim(0,space1+space)
plt.legend('A' 'B' 'C',loc=2)
plt.ylabel(r'$|V|_{(v)}$',fontsize=10)
plt.show()
# plt.axis('equal')
# plt.title('V')
#%%
ev=reps['dynamic']
anom=ev.window
space1=740
space=50000
#ax2=plt.subplot(312,sharex=ax1)
# ax2.set(adjustable='box', aspect='equal')
for i in [3,4,5]:
from matplotlib.ticker import StrMethodFormatter
# plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # No decimal places
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
plt.ylabel(r'$|I|_{(Amp)}$',fontsize=10)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}')) # 2 decimal places
plt.show()
#%%
ev=reps['backtoback']
anom=ev.window
space1=340
space=65
#ax3=plt.subplot(313,sharex=ax1)
# ax3.set(adjustable='box', aspect='equal')
for i in [3,4,5]:
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}')) # 2 decimal places
# plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.grid(True)
# plt.axis('equal')
# plt.legend('A' 'B' 'C')
plt.ylabel(r'$cos(\theta)$',fontsize=10)
hfont = {'fontname':'sans-serif'}
plt.xlabel('Timestamps',fontsize=13,**hfont)
#plt.ylim(-100,100)
plt.xlim(0,space1+space)
#figname=dst+'big'
#plt.savefig(figname,dpi=800)
plt.grid(True)
#plt.axis('equal')
plt.show()
#close()
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # save heatmap to show correlation between features
# =============================================================================
# =============================================================================
# =============================================================================
filename='data/Armin_Data/July_14/pkl/rawdata14.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds14=load_standardized_data_with_features(filename,k)
dd3=load_data_with_features(filename,k)
start,SampleNum,N=(0,40,500000)
#%%
import seaborn as sn
id=[r'$\mid V_A \mid$',r'$\mid V_B \mid$',r'$\mid V_C \mid$',r'$\mid I_A \mid$',r'$\mid I_B \mid$',r'$\mid I_C \mid$',r'$cos(\theta_A)$',r'$cos(\theta_B)$',r'$cos(\theta_C)$']
corr=pd.DataFrame(np.corrcoef(dd3),index=id,columns=id)
sn.set(rc={'text.usetex': True})
#f, ax = plt.subplots(figsize=(16, 5))
#ax.set_ylabel('abc', rotation=0, fontsize=20, labelpad=20)
sn.set(font_scale=0.7)
#sn.plt.set_fontsize('18')
svm = sn.heatmap(corr,
cbar_kws={'fraction' : 0.1},
linewidth=0.5, annot_kws={"size": 20})
svm.tick_params(labelsize=9)
#svm.set_xlabel(fontweight='bold')
svm.set_xticklabels(svm.get_xticklabels(), rotation=0,fontweight='bold',weight='bold')
svm.set_yticklabels(svm.get_yticklabels(), rotation=0, horizontalalignment='right',fontweight='bold',weight='bold')
plt.ylabel(r'$Features \ time \ series \ for \ a \ day$',fontweight='bold',fontsize=10)
plt.xlabel(r'$Features \ time \ series \ for \ a \ day$',fontweight='bold',fontsize=10)
#svm.ylabel('hi')
#plt.show()
#ax.ylabel('hi')
#
figure = svm.get_figure()
figure.savefig('journal/figures/heatmap.png',dpi=800)
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # each case figure for cluster representative
# =============================================================================
# =============================================================================
# =============================================================================
dst='journal/'
def show_event(ev,select_1224,dst):
SampleNum=40
c=['r','b','k']
for anom in events:
print(anom)
anom=int(anom)
# anom=events[anom]
# print(anom)
space1=240
space=240
plt.subplot(311)
for i in [0,1,2]:
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
plt.legend('A' 'B' 'C')
plt.ylabel(r'$|V|_{(v)}$',fontsize=30)
# plt.title('V')
plt.subplot(312)
for i in [3,4,5]:
from matplotlib.ticker import StrMethodFormatter
# plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # No decimal places
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}')) # 2 decimal places
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
# plt.legend('A' 'B' 'C')
plt.ylabel(r'$|I|_{(Amp)}$',fontsize=30)
plt.subplot(313)
for i in [6,7,8]:
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.2f}')) # 2 decimal places
# plt.plot(select_1224[i][anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)])
# plt.legend('A' 'B' 'C')
plt.plot(select_1224[i][anom*int(SampleNum/2)-space1:(anom*int(SampleNum/2)+space)],color=c[i%3])
# plt.legend('A' 'B' 'C')
plt.ylabel(r'$cos(\theta)$',fontweight='bold',fontsize=20)
plt.xlabel('Timestamps',fontsize=20)
figname=dst+"/"+name+'_'+str(day)
plt.savefig(figname,dpi=800)
plt.show()
close()
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # extracting all events related to the inrush current for july 3
# =============================================================================
# =============================================================================
# =============================================================================
####inrush events
inrush=[]
for i in total_event_cluster_data[4]:
if total_event_cluster_data[4][i]==6:
inrush.append(i)
#%%
###extract the magnitude and delta for each event
inrush_analysis={}
for i in inrush:
anom=i
wdata=dd[:,anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)]
tempwdata=wdata[:,200:400]
curr=tempwdata[3,:]
pf=tempwdata[6,:]
m = max(curr)
index=[i for i, j in enumerate(curr) if j == m][0]
imax=m
ibefore=curr[index-10]
iafter=curr[index+10]
m = min(pf)
index=[i for i, j in enumerate(pf) if j == m][0]
pfbefore=pf[index-10]
pfafter=pf[index+10]
inrush_analysis[i]=[imax-ibefore,iafter-ibefore,pfafter-pfbefore]
#%%
v=pd.DataFrame(inrush_analysis)
#%%
#plt.scatter(v.iloc[0],v.iloc[1])
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x =v.iloc[0]
y =v.iloc[1]
z =v.iloc[2]
ax.scatter(x, y, z, c='r', marker='o')
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))
ax.set_xlabel(r'$\Delta(I_{inrush})$',fontweight='bold',fontsize=10)
ax.set_ylabel(r'$\Delta(I_{steady \ state})$',fontweight='bold',fontsize=10)
ax.set_zlabel(r'$\Delta(cos(\theta)_{steady \ state})$',fontweight='bold',fontsize=10)
figname=dst+"/"+str('inrushscatter3d')
plt.savefig(figname,dpi=800)
plt.show()
#%%
# =============================================================================
# =============================================================================
# # 3d inrush
# =============================================================================
# =============================================================================
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
x =np.array(list(inrvalue.iloc[0]))
y =np.array(list(inrvalue.iloc[1]))
z =np.array(list(inrvalue.iloc[2]))
c=np.array(list(inrvalue.iloc[3]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i,j in enumerate(markers):
idata=inrvalue.loc[:,inrvalue.iloc[4]==j]
x =np.array(list(idata.iloc[0]))
y =np.array(list(idata.iloc[1]))
z =np.array(list(idata.iloc[2]))
c=np.array(list(idata.iloc[3]))
ax.scatter(x,y,z,c=c)
#
#ax.scatter(x,y,z,c=c)
#
#x =v.iloc[0]
#y =v.iloc[1]
#z =v.iloc[2]
#
#plt.scatter(inrvalue.iloc[0],inrvalue.iloc[1],inrvalue.iloc[2],c=inrvalue.iloc[3])
#
##ax.scatter(x, y, z, c='r', marker='o')
#plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))
#plt.xlabel(r'$\Delta(I_{inrush})$',fontweight='bold',fontsize=15)
#plt.ylabel(r'$\Delta(I_{steady \ state})$',fontweight='bold',fontsize=15)
#plt.ylabel(r'$\Delta(pf_{steady \ state})$',fontweight='bold',fontsize=15)
##ax.set_zlabel(r'$\Delta(cos(\theta)_{steady \ state})$',fontweight='bold',fontsize=10)
#
#figname=dst+"/"+str('inrushscatter2d')
#plt.savefig(figname,dpi=800)
plt.show()
#%%
# =============================================================================
# =============================================================================
# # inr event statistical figures
# =============================================================================
# =============================================================================
####medium events
inr_analysis={}
count=0
inr={}
colors=['r','b','c','k','g','y']
markers=['.','^','s','*','+','d']
d=0
for day in total_event_cluster_data:
inr[day]=[]
for i in total_event_cluster_data[day]:
if total_event_cluster_data[day][i]==6:
inr[day].append(i)
filename='data/Armin_Data/July_0'+str(day)+'/pkl/rawdata'+str(day)+'.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds4=load_standardized_data_with_features(filename,k)
dayta=load_data_with_features(filename,k)
###extract the magnitude and delta for each event
for i in inr[day]:
anom=i
wdata=dayta[:,anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)]
tempwdata=wdata[:,200:400]
curr=tempwdata[3,:]
active=tempwdata[0]*tempwdata[3]*tempwdata[6]
reactive=tempwdata[0]*tempwdata[3]*(np.sqrt(1-tempwdata[6]**2))
pf=tempwdata[6,:]
m = max(curr)
index=[i for i, j in enumerate(curr) if j == m][0]
if index<170 and index>30:
imax=m
ibefore=curr[index-30]
iafter=curr[index+30]
m = min(pf)
index=[i for i, j in enumerate(pf) if j == m][0]
if index<170 and index>30:
pfbefore=pf[index-30]
activebefore=active[index-30]
reactivebefore=reactive[index-30]
pfafter=pf[index+30]
activepost=active[index+30]
reactivepost=reactive[index+30]
color=colors[d]
marker=markers[d]
inr_analysis[count]=[imax-ibefore,iafter-ibefore,activebefore,reactivebefore,
activepost,reactivepost,pfbefore,pfafter,color,marker,anom,d]
count+=1
d+=1
#%%
####medium events
inr_analysis={}
count=0
inr={}
colors=['r','b','c','k','g','y']
markers=['.','^','s','*','+','d']
d=0
for day in total_event_cluster_data:
inr[day]=[]
for i in total_event_cluster_data[day]:
if total_event_cluster_data[day][i]==6:
inr[day].append(i)
filename='data/Armin_Data/July_0'+str(day)+'/pkl/j'+str(day)+'.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','PA', 'PB', 'PC','QA', 'QB', 'QC']
#dds4=load_standardized_data_with_features(filename,k)
dayta=load_data_with_features(filename,k)
filename='data/Armin_Data/July_0'+str(day)+'/pkl/rawdata'+str(day)+'.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds4=load_standardized_data_with_features(filename,k)
pfdata=load_data_with_features(filename,k)
###extract the magnitude and delta for each event
for i in inr[day]:
anom=i
wdata=dayta[:,anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)]
pfwdata=pfdata[:,anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)]
tempwdata=wdata[:,200:400]
pfwdata=pfwdata[:,200:400]
curr=tempwdata[3,:]
active=tempwdata[6,:]
reactive=tempwdata[9,:]
pf=pfwdata[6,:]
# pf=tempwdata[6,:]
m = max(curr)
index=[i for i, j in enumerate(curr) if j == m][0]
if index<170 and index>30:
imax=m
ibefore=curr[index-30]
iafter=curr[index+30]
activebefore=active[index-30]
reactivebefore=reactive[index-30]
activepost=active[index+30]
reactivepost=reactive[index+30]
pfbefore=pf[index-30]
pfafter=pf[index+30]
color=colors[d]
marker=markers[d]
inr_analysis[count]=[imax-ibefore,iafter-ibefore,activebefore,reactivebefore,
activepost,reactivepost,pfbefore,pfafter,color,marker,anom,d]
count+=1
d+=1
#%%
#inrvalue=pd.DataFrame(inr_analysis)
#inrvalue.iloc[1]=inrvalue.iloc[1]+1
inrvalue.iloc[11]=inrvalue.iloc[11]+4
#%%
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.scatter(inrvalue.iloc[7],inrvalue.iloc[6],c=inrvalue.iloc[8])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(r'$\Delta(I_{inrush})$',fontweight='bold',fontsize=30)
plt.ylabel(r'$\Delta(I_{steady \ state})$',fontweight='bold',fontsize=30)
#figname=dst+"/"+str('inrushscatter2dwcolor7days')
plt.show()
#plt.savefig(figname,dpi=800)
#%%
for i,j in enumerate(markers):
idata=inrvalue.loc[:,inrvalue.iloc[4]==j]
plt.scatter(idata.iloc[3],idata.iloc[1]+1,c=idata.iloc[3],s=20)
#%%
scipy.io.savemat('inrvalue.mat', {'data':[list(inrvalue.iloc[0].values),
list(inrvalue.iloc[1].values),
list(inrvalue.iloc[2].values),
list(inrvalue.iloc[3].values),
list(inrvalue.iloc[4].values),
list(inrvalue.iloc[5].values),
list(inrvalue.iloc[6].values),
list(inrvalue.iloc[7].values),
list(inrvalue.iloc[11].values)]})
#%%
# =============================================================================
# =============================================================================
# # medium event statistical figures
# =============================================================================
# =============================================================================
####medium events
med_analysis={}
count=0
med={}
colors=['r','b','c','k','g','y']
markers=['.','^','s','*','+','d']
d=0
for day in total_event_cluster_data:
med[day]=[]
for i in total_event_cluster_data[day]:
if total_event_cluster_data[day][i]==3:
med[day].append(i)
filename='data/Armin_Data/July_0'+str(day)+'/pkl/rawdata'+str(day)+'.pkl'
k=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG','TA', 'TB', 'TC']
#dds4=load_standardized_data_with_features(filename,k)
dayta=load_data_with_features(filename,k)
###extract the magnitude and delta for each event
for i in med[day]:
anom=i
wdata=dayta[:,anom*int(SampleNum/2)-240:(anom*int(SampleNum/2)+240)]
tempwdata=wdata
curr=tempwdata[3,:]
mx = max(curr)
mi = min(curr)
mean=np.mean(curr)
eps=0.2
index=[]
cr=0
for i,j in enumerate(curr):
if j>mean and cr==0:
index.append(i)
cr=1
if cr==1 and j<=mean:
cr=2
index.append(i)
if len(index)==2:
if index[0]>10 and index[1]<470:
before=curr[index[0]-10]
after=curr[index[1]+10]
durr=index[1]-index[0]
color=colors[d]
marker=markers[d]
med_analysis[count]=[durr,after-before,color,marker,anom]
count+=1
# inr_analysis[count]=[imax-ibefore,iafter-ibefore,pfafter-pfbefore,color,marker,anom]
count+=1
d+=1
#%%
medvalue=pd.DataFrame(med_analysis)
#%%
plt.scatter(medvalue.iloc[0],medvalue.iloc[1])
#%%
for i,j in enumerate(markers):
idata=medvalue.loc[:,medvalue.iloc[3]==j]
plt.scatter(idata.iloc[0]+4,idata.iloc[1]+1,c=idata.iloc[2])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(r'$Duration (timeslots)$',fontweight='bold',fontsize=30)
plt.ylabel(r'$\Delta(I_{steady \ state})$',fontweight='bold',fontsize=30)
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
21,999
|
zyh88/PMU
|
refs/heads/master
|
/pv pmu data cleaning with features as column.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 12:26:15 2019
@author: hamed
"""
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import pickle as pkl
import matplotlib.pyplot as plt
import operator
import math
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
# whole data filenames in the data directory
filenames=os.listdir("data/jul 1")
#%%
# importing data from a file function
def cleancsv(filename):
dir_name="data/jul 1"
base_filename=filename
pathr=os.path.join(dir_name, base_filename)
# imported_data=pd.read_csv(path,header=None, error_bad_lines=False)
with open(pathr,'r') as f:
dir_name="data/jul1sorted"
pathw=os.path.join(dir_name,filename)
with open(pathw,'w') as f1:
for i in range(6):
next(f) # skip header line
for line in f:
f1.write(line)
#%%
for file in filenames:
cleancsv(file)
#%%
# =============================================================================
# =============================================================================
# # make time
# =============================================================================
# =============================================================================
samplingrate=60
timenum=3600*samplingrate
timeslots=np.arange(0,timenum).transpose()
#%%
filenames=os.listdir("data/jul1sorted")
#%%
# importing data from a file function
def OneFileImport(filename):
dir_name="data/jul1sorted"
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path)
return imported_data
#%%
samplingrate=60
timenum=3600*samplingrate
timeslots=np.arange(0,timenum).transpose()
for file in filenames:
print(file)
data=OneFileImport(file)
k=data.keys()
data=data.drop(columns=[k[0],k[1],k[2],k[15],k[16]])
k=data.keys()
f=['L1MAG','L1ANG','L2MAG','L2ANG','L3MAG','L3ANG','C1MAG','C1ANG','C2MAG','C2ANG','C3MAG','C3ANG']
for count,i in enumerate(k):
data=data.rename(index=str, columns={i:f[count]})
data=data.iloc[0:timenum]
Active={}
Reacive={}
#keys={}
# pf={}
selected_data={}
##
Active['A']=data['L1MAG']*data['C1MAG']*(np.cos((data['L1ANG']-data['C1ANG'])*(np.pi/180)))
Active['B']=data['L2MAG']*data['C2MAG']*(np.cos((data['L2ANG']-data['C2ANG'])*(np.pi/180)))
Active['C']=data['L3MAG']*data['C3MAG']*(np.cos((data['L3ANG']-data['C3ANG'])*(np.pi/180)))
Reacive['A']=data['L1MAG']*data['C1MAG']*(np.sin((data['L1ANG']-data['C1ANG'])*(np.pi/180)))
Reacive['B']=data['L2MAG']*data['C2MAG']*(np.sin((data['L2ANG']-data['C2ANG'])*(np.pi/180)))
Reacive['C']=data['L3MAG']*data['C3MAG']*(np.sin((data['L3ANG']-data['C3ANG'])*(np.pi/180)))
# pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
# pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
# pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
#
#
selected_data['PA']=Active['A']
selected_data['PB']=Active['B']
selected_data['PC']=Active['C']
selected_data['QA']=Reacive['A']
selected_data['QB']=Reacive['B']
selected_data['QC']=Reacive['C']
features=['L1MAG','L2MAG', 'L3MAG','C1MAG','C2MAG', 'C3MAG']
for f in features:
selected_data[f]=data[f]
selected_data['timeslot']=timeslots
selected_data['hour']=np.ones(timenum)*(int(file.split(sep='.')[0]))
# selected_data['pfA']=pf['A']
# selected_data['pfB']=pf['B']
# selected_data['pfC']=pf['C']
form='.pkl'
filename=file.split(sep='.')[0]+form
dir_name="data/jul1pkl"
path=os.path.join(dir_name,filename)
print(path)
output = open(path, 'wb')
pkl.dump(selected_data, output)
output.close()
#%%
dirname="data/jul1pkl/1.pkl"
pkl_file = open(dirname, 'rb')
dd=pkl.load(pkl_file)
pkl_file.close()
#%%
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
22,000
|
zyh88/PMU
|
refs/heads/master
|
/test_GAN_Clasiffication.py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import keras
from keras.layers import Dense, Dropout, Input, Embedding, LSTM, Reshape, CuDNNLSTM
from keras.models import Model,Sequential
from keras.datasets import mnist
from tqdm import tqdm
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.optimizers import adam
import numpy as np
import tensorflow as tf
import pickle as pkl
import operator
import math
from sklearn import preprocessing
from keras.models import load_model
import time
from scipy.stats import norm
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.fftpack import fft, ifft
from dtw import dtw
from fastdtw import fastdtw
import time
from scipy.spatial.distance import euclidean
from tslearn.clustering import GlobalAlignmentKernelKMeans
#%%
# =============================================================================
# =============================================================================
# # standardized data extraxtion
# =============================================================================
# =============================================================================
#filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_standardized_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
print(select.shape)
select=preprocessing.scale(select,axis=1)
return select
#%%
# =============================================================================
# =============================================================================
# # real data extraxtion
# =============================================================================
# =============================================================================
#filename='data/Armin_Data/July_03/pkl/jul3.pkl'
def load_real_data(filename):
#read a pickle file
pmu='1224'
pkl_file = open(filename, 'rb')
selected_data = pkl.load(pkl_file)
pkl_file.close()
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.fillna(method='ffill')
print(selected_data.keys())
data=selected_data[pmu]
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
select=[]
for f in features:
select.append(list(data[f]))
select=np.array(select)
return select
#%%
filename='data/Armin_Data/July_03/pkl/J3.pkl'
select_1224=load_real_data(filename)
#%%
#start,SampleNum,N=(0,40,500000)
#group={}
#group['0']=[]
#group['1']=[]
#for window in range(N):
# if window>=0:
# print(window)
#
#
# plt.subplot(221)
# for i in [0,1,2]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('V')
#
# plt.subplot(222)
# for i in [3,4,5]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('I')
#
# plt.subplot(223)
# for i in [6,7,8]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('P')
#
# plt.subplot(224)
# for i in [9,10,11]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('Q')
# plt.show()
#
# gr=input("which group?: ")
#
# if not gr in group:
# print('wrong')
# gr=input("which group?: ")
# group[gr].append(window)
# else:
# group[gr].append(window)
#%%
#import _thread
#import threading
#start,SampleNum,N=(0,40,500000)
#eventwindow=[]
##group['0']=[]
##group['1']=[]
#thresh=427104
#while thresh<500000:
# try:
# for window in range(N):
# if window>=thresh:
# print(window)
#
#
# plt.subplot(221)
# for i in [0,1,2]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('V')
#
# plt.subplot(222)
# for i in [3,4,5]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('I')
#
# plt.subplot(223)
# for i in [6,7,8]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('P')
#
# plt.subplot(224)
# for i in [9,10,11]:
# plt.plot(select_1224[i][window*int(SampleNum/2):(window*int(SampleNum/2)+40)])
# plt.legend('A' 'B' 'C')
# plt.title('Q')
# plt.show()
## time.sleep(1)
# except KeyboardInterrupt:
# window=input("which group?: ")
#
# eventwindow.append(int(window))
# thresh=int(window)
#
# real_event="data/Armin_Data/eventwindowbyhand.pkl"
# output = open(real_event, 'wb')
# pkl.dump(eventwindow, output)
# output.close()
#%%
#pkl_file = open(real_event, 'rb')
#real_event = pkl.load(pkl_file)
#pkl_file.close()
#
##%%
#import signal
#def interrupted(signum, frame):
# print("Timeout!")
#signal.signal(signal.SIGALRM, interrupted)
#signal.alarm(5)
#try:
# s = input("::>")
#except:
# print("You are interrupted.")
#signal.alarm(0)
##%%
#
#real_event="data/Armin_Data/categories.pkl"
#output = open(real_event, 'wb')
#pkl.dump(group, output)
#output.close()
##%%
#pkl_file = open(real_event, 'rb')
#real_event = pkl.load(pkl_file)
#pkl_file.close()
#
#%%
# =============================================================================
# Reading the files in the data to make a for
# =============================================================================
files=os.listdir('figures/all_events/')
#%%
# =============================================================================
# =============================================================================
# =============================================================================
# # # take out anommalies# =============================================================================
# =============================================================================
# =============================================================================
anomalies={}
for num,file in enumerate(files):
if num<13:
if not file.endswith(".txt"):
dir='figures/all_events/'
dir=dir+file+"/GAN"
tempfiles=os.listdir(dir)
for f in tempfiles:
if f.endswith(".csv"):
anomfile=dir+'/'+f
ta=pd.read_csv(anomfile)
anomalies[file]=ta.values
print(dir)
dir='figures/all_events/'
dir=dir+file+"/GAN_voltage"
tempfiles=os.listdir(dir)
for f in tempfiles:
if f.endswith(".csv"):
anomfile=dir+'/'+f
ta=pd.read_csv(anomfile)
anomalies[file+'v']=ta.values
print(dir)
#%%
# =============================================================================
# =============================================================================
# # save all the animalies for GAN model
# =============================================================================
# =============================================================================
output = open('figures/all_events/All_GAN_anomalies.pkl', 'wb')
pkl.dump(anomalies, output)
output.close()
#%%
# =============================================================================
# read anomalies
# =============================================================================
pkl_file = open('figures/all_events/All_GAN_anomalies.pkl', 'rb')
anomalies = pkl.load(pkl_file)
pkl_file.close()
#%%
select_1224=load_standardized_data('data/Armin_Data/July_03/pkl/J3.pkl')
#%%
start,SampleNum,N=(0,40,500000)
event_points={}
for anom in anomalies['July_03']:
anom=int(anom)
event_points[anom]=select_1224[0:12,anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)]
#%%
# =============================================================================
# =============================================================================
# # calculate absolute of the events fft
# =============================================================================
# =============================================================================
fft_scores={}
fs=[]
for event in anomalies['July_03']:
event=int(event)
v=[]
i=[]
p=[]
q=[]
for j in range(3):
v.append(np.absolute(fft(event_points[event][0+j])[1:120]))
i.append(np.absolute(fft(event_points[event][3+j])[1:120]))
p.append(np.absolute(fft(event_points[event][6+j])[1:120]))
q.append(np.absolute(fft(event_points[event][9+j])[1:120]))
v= [item for sublist in v for item in sublist]
i=[item for sublist in i for item in sublist]
p=[item for sublist in p for item in sublist]
q=[item for sublist in q for item in sublist]
vi=np.concatenate((v,i))
pq=np.concatenate((p,q))
fft_scores[event]=np.concatenate((vi,pq))
fs.append(np.concatenate((vi,pq)))
fs=np.array(fs)
#%%
# =============================================================================
# =============================================================================
# # classifying the events with fft
# =============================================================================
# =============================================================================
X=fs
mm=0
for n_clusters in np.arange(2,15):
clusterer = KMeans(n_clusters=n_clusters, random_state=0)
cluster_labels = clusterer.fit_predict(X)
silhouette_avg = silhouette_score(X, cluster_labels)
if silhouette_avg >mm:
mm=silhouette_avg
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print(mm)
#%%
# =============================================================================
# =============================================================================
# # best so far
# =============================================================================
# =============================================================================
n_clusters=6
clusterer = KMeans(n_clusters=n_clusters, random_state=0)
cluster_labels = clusterer.fit_predict(X)
#%%
# =============================================================================
# =============================================================================
# # show the cluster center
# =============================================================================
# =============================================================================
centers={}
for cl in range(n_clusters):
count=0
print(cl)
centers[cl]=np.zeros((12,240))
for num,event in enumerate(event_points):
if cluster_labels[num]==cl:
count+=1
centers[cl]+=event_points[event]
centers[cl]=centers[cl]/count
plt.subplot(221)
for i in [0,1,2]:
plt.plot(centers[cl][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(centers[cl][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(centers[cl][i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(centers[cl][i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%
for cl in range(n_clusters):
count=0
for num,event in enumerate(event_points):
if cluster_labels[num]==cl:
if count<20:
print(cl)
plt.plot(fft_scores[event])
plt.show()
count+=1
#%%
# =============================================================================
# =============================================================================
# # show some sample from each cluster in one day
# =============================================================================
# =============================================================================
#cl=0
for cl in range(n_clusters):
count=0
for num,event in enumerate(event_points):
if cluster_labels[num]==cl:
if count<2:
print(cl)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
count+=1
# count+=1
#%%
for event in anomalies['July_03']:
print(event)
event=int(event)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(event_points[event][i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
v=fft(event_points[event][0]-np.mean(event_points[event][0]))
plt.plot(v[1:120])
plt.show()
i=fft(event_points[event][3]-np.mean(event_points[event][3]))
plt.plot(i[1:120])
plt.show()
#%%
#%%
#data_files=os.listdir('data/Armin_Data')
#event_points={}
#start,SampleNum,N=(0,40,500000)
#for day in anomalies:
# print(day)
# anoms=anomalies[day]
# dir="data/Armin_Data/"+ day + "/pkl/"
# selectedfile=os.listdir(dir)[0]
# filename = dir + selectedfile
# select_1224=load_standardized_data(filename)
# event_points[day]={}
# for anom in anoms:
# anom=int(anom)
# event_points[day][anom]=select_1224[0:12,anom*int(SampleNum/2)-120:(anom*int(SampleNum/2)+120)]
#
# #%%
#
#eventpointsfile="data/Armin_Data/event_hand_standardized.pkl"
##%%
#output = open(eventpointsfile, 'wb')
#pkl.dump(event_points, output)
#output.close()
##%%
#pkl_file = open(eventpointsfile, 'rb')
#event_points = pkl.load(pkl_file)
#pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # classifying first day events by hand
# =============================================================================
# =============================================================================
#group={}
##%%
#for event in event_points['July_03']:
# if event>0:
# print(event)
#
# plt.subplot(221)
# for i in [0,1,2]:
# plt.plot(event_points['July_03'][event][i])
# plt.legend('A' 'B' 'C')
# plt.title('V')
#
# plt.subplot(222)
# for i in [3,4,5]:
# plt.plot(event_points['July_03'][event][i])
# plt.legend('A' 'B' 'C')
# plt.title('I')
#
# plt.subplot(223)
# for i in [6,7,8]:
# plt.plot(event_points['July_03'][event][i])
# plt.legend('A' 'B' 'C')
# plt.title('P')
#
# plt.subplot(224)
# for i in [9,10,11]:
# plt.plot(event_points['July_03'][event][i])
# plt.legend('A' 'B' 'C')
# plt.title('Q')
# plt.show()
#
# gr=input("which group?: ")
#
# if not gr in group:
# permission=input('sure?')
# if permission=='y':
# group[gr]=[event]
# else:
# gr=input("which group?: ")
# if not gr in group:
# permission=input('sure?')
# if permission=='y':
# group[gr]=[event]
# else:
# group[gr].append(event)
##%%
## =============================================================================
## =============================================================================
## # save the groups
## =============================================================================
## =============================================================================
#
#categoriesfile="data/Armin_Data/categories.pkl"
##%%
#output = open(categoriesfile, 'wb')
#pkl.dump(group, output)
#output.close()
##%%
#pkl_file = open(categoriesfile, 'rb')
#saved_group = pkl.load(pkl_file)
#pkl_file.close()
#%%
# =============================================================================
# =============================================================================
# # show the mean value of each category
# =============================================================================
## =============================================================================
#
#count=0
#for g in saved_group:
# group_size=len(saved_group[g])
# for event in saved_group[g]:
# if count==0:
# mean_events=event_points['July_03'][event]
# count=1
# else:
# mean_events+=event_points['July_03'][event]
# mean_events=mean_events/group_size
# print("group name: ",g," number of events: ",group_size)
# plt.subplot(221)
# for i in [0,1,2]:
# plt.plot(mean_events[i])
# plt.legend('A' 'B' 'C')
# plt.title('V')
#
# plt.subplot(222)
# for i in [3,4,5]:
# plt.plot(mean_events[i])
# plt.legend('A' 'B' 'C')
# plt.title('I')
#
# plt.subplot(223)
# for i in [6,7,8]:
# plt.plot(mean_events[i])
# plt.legend('A' 'B' 'C')
# plt.title('P')
#
# plt.subplot(224)
# for i in [9,10,11]:
# plt.plot(mean_events[i])
# plt.legend('A' 'B' 'C')
# plt.title('Q')
# plt.show()
# print(".......................")
#%%
# =============================================================================
# =============================================================================
# # save the anomalies standardized data for 15 days
# =============================================================================
# =============================================================================
#anomcsvfile="data/Armin_Data/anomsknnformat.pkl"
#output = open(anomcsvfile, 'wb')
#pkl.dump(event_points, output)
#output.close()
#%%
# =============================================================================
# =============================================================================
# # read event_points
# =============================================================================
## =============================================================================
#anomcsvfile="data/Armin_Data/anomsknnformat.pkl"
#pkl_file = open(anomcsvfile, 'rb')
#event_points = pkl.load(pkl_file)
#pkl_file.close()
##%%
#X=[]
#for day in event_points:
# for event in event_points[day]:
# X.append(event_points[day][event].ravel())
#X=np.array(X)
##%%
#kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
##%%
#
#for n_clusters in np.arange(10,40):
# clusterer = KMeans(n_clusters=n_clusters, random_state=10)
# cluster_labels = clusterer.fit_predict(X)
# silhouette_avg = silhouette_score(X, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
#
##%%
##pkl_file = open(anomcsvfile, 'rb')
##test = pkl.load(pkl_file)
#pkl_file.close()
##%%
#similarity_matrix=[]
#similarity_scores={}
#tik=time.clock()
#for day1 in event_points:
# similarity_scores[day1]={}
# print(day1)
# for anom1 in event_points[day1]:
# print(anom1)
# temp_similarity=[]
#
# similarity_scores[day1][anom1]={}
#
# x1=event_points[day1][anom1][::3]-np.mean(event_points[day1][anom1][::3],axis=1).reshape(4,1)
# x1=x1.ravel()
#
# for day2 in event_points:
# print(day2)
# similarity_scores[day1][anom1][day2]={}
#
# for anom2 in event_points[day2]:
# print(anom2)
# x2=event_points[day2][anom2][::3]-np.mean(event_points[day2][anom2][::3],axis=1).reshape(4,1)
# x2=x2.ravel()
#
## plt.plot(event_points['July_10'][i][0]-np.mean(event_points['July_10'][i][0]))
## plt.plot(event_points['July_10'][j][0]-np.mean(event_points['July_10'][j][0]))
## plt.show()
# d, path = fastdtw(x1, x2, dist=euclidean_norm)
# print(d)
# similarity_scores[day1][anom1][day2][anom2]=d
# temp_similarity.append(d)
#
# temp_similarity=np.array(temp_similarity)
# similarity_matrix.append(temp_similarity)
#similarity_matrix=np.array(similarity_matrix)
#toc = time.clock()
#print(toc-tik)
#time_4features=toc-tik
# print(d)
# plt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')
# plt.plot(path[0], path[1], 'w')
# plt.show()
## print('...........................................................')
##%%
## =============================================================================
## =============================================================================
## # calculating fft for each event and save them
## =============================================================================
## =============================================================================
#
#
#fft_scores={}
#total_events=0
#all_evnets_scores=[]
#for day1 in event_points:
# fft_scores[day1]={}
## print(day1)
#
# for count,anom1 in enumerate(event_points[day1]):
## print(anom1)
# total_events+=1
# x1=event_points[day1][anom1][::3]-np.mean(event_points[day1][anom1][::3],axis=1).reshape(4,1)
#
# fft_scores[day1][anom1]=np.concatenate((np.fft.fftn(x1)[:,0:120].real.ravel(),np.fft.fftn(x1)[:,0:120].imag.ravel()),axis=None)
# # =============================================================================
## =============================================================================
## # make trainig data with fft output
## =============================================================================
## =============================================================================
#
#
# all_evnets_scores.append(np.concatenate((np.fft.fftn(x1)[:,0:120].real.ravel(),np.fft.fftn(x1)[:,0:120].imag.ravel()),axis=None))
#
# if count% 500==0:
# print('iter num: %count', count)
#print(total_events)
#anomcsvfile="data/Armin_Data/fftscores.pkl"
#output = open(anomcsvfile, 'wb')
#pkl.dump(fft_scores, output)
#output.close()
#
#
#all_evnets_scores=np.array(all_evnets_scores)
#
#all_evnets_scores_file="data/Armin_Data/all_evnets_scores_file.pkl"
#output = open(all_evnets_scores_file, 'wb')
#pkl.dump(all_evnets_scores, output)
#output.close()
#
##%%
#X=all_evnets_scores
#
#for n_clusters in np.arange(10,50):
# clusterer = KMeans(n_clusters=n_clusters, random_state=0)
# cluster_labels = clusterer.fit_predict(X)
# silhouette_avg = silhouette_score(X, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
##%%
## =============================================================================
## =============================================================================
## # best cluster number by fft is 18 based in silhouette
## =============================================================================
## =============================================================================
#n_clusters=18
#clusterer = KMeans(n_clusters=n_clusters, random_state=0)
#cluster_labels = clusterer.fit_predict(X)
##%%
## =============================================================================
## =============================================================================
## # predict the labels for main dataset
## =============================================================================
## =============================================================================
#labels={}
#start=0
#end=0
#for day in event_points:
# num_anom=len(event_points[day].keys())
# end=start+num_anom
# selected_fft=all_evnets_scores[start:end]
# labels[day]=clusterer.fit_predict(selected_fft)
# start=end
# print(day)
##%%
## =============================================================================
## =============================================================================
## # show some sample from each cluster in one day
## =============================================================================
## =============================================================================
#
#count=0
#for anom in event_points['July_03']:
# print(labels['July_03'][count])
# plt.subplot(121)
# plt.plot(event_points['July_03'][anom][0])
# plt.subplot(122)
# plt.plot(event_points['July_03'][anom][3])
# plt.show()
# count+=1
#
##%%
#
##%%%
#for day1 in ['July_03']:
# similarity_scores[day1]={}
# print(day1)
# for anom1 in event_points[day1]:
# temp_similarity=[]
# print(anom1)
# similarity_scores[day1][anom1]={}
#
# x1=event_points[day1][anom1][::3]-np.mean(event_points[day1][anom1][::3],axis=1).reshape(4,1)
# x1=x1[3]
# ff=np.fft.fft(x1)
# freq = np.fft.fftfreq(x1.shape[-1])
#
# widths = np.arange(1, 240)
# cwtmatr = signal.cwt(x1, signal.ricker,widths)
# plt.subplot(131)
# plt.plot(freq, ff.real, freq, ff.imag)
# plt.subplot(132)
# plt.plot(x1)
# plt.subplot(133)
# plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
# vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
# plt.show()
#%%
k=list(event_points.keys())
dtw_scores={}
#x1=event_points[351][0]-np.mean(event_points[351][0])
for event1 in k[0:20]:
x1=event_points[event1][0]-np.mean(event_points[event1][0])
dtw_scores[event1]=[]
print(event1)
for event2 in k[0:20]:
x2=event_points[event2][0]-np.mean(event_points[event2][0])
# plt.plot(x1)
# plt.plot(x2)
# plt.show()
distance, path = fastdtw(x1, x2, dist=euclidean)
# d, cost_matrix, acc_cost_matrix, path = dtw(x1, x2, dist=euclidean_norm)
# plt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')
#
# plt.plot(path[0], path[1], 'w')
# plt.show()
# d
dtw_scores[event1].append(distance)
#%%
ds=[]
for i in dtw_scores:
ds.append(list(dtw_scores[i]))
ds=np.array(ds)
#%%
plt.plot(dtw_scores)
#%%
for num,event in enumerate(event_points):
if dtw_scores[num]<10:
plt.plot(event_points[event][0])
plt.show()
#%%
plt.imshow(ds.transpose(), origin='lower', cmap='gray', interpolation='nearest')
#%%
from tslearn.clustering import TimeSeriesKMeans
#%%
X_train=[]
for event in k[0:1000]:
X_train.append(list(event_points[event]))
#%%
km = GlobalAlignmentKernelKMeans(n_clusters=6)
km.fit(X_train)
#%%
lb=km.labels_
#%%
for yi in range(n_clusters):
indices = [i for i, x in enumerate(lb) if x == yi]
# plt.subplot(3, 1, 1 + yi)
count=0
for ind in indices:
if count<10:
print(yi)
plt.subplot(221)
for i in [0,1,2]:
plt.plot(X_train[ind][i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(X_train[ind][i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(X_train[ind][i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(X_train[ind][i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
# plt.plot(X_train[ind][3])
count+=1
plt.show()
#%%
cent=km.cluster_centers_
for c in cent:
plt.subplot(221)
for i in [0,1,2]:
plt.plot(c[i])
plt.legend('A' 'B' 'C')
plt.title('V')
plt.subplot(222)
for i in [3,4,5]:
plt.plot(c[i])
plt.legend('A' 'B' 'C')
plt.title('I')
plt.subplot(223)
for i in [6,7,8]:
plt.plot(c[i])
plt.legend('A' 'B' 'C')
plt.title('P')
plt.subplot(224)
for i in [9,10,11]:
plt.plot(c[i])
plt.legend('A' 'B' 'C')
plt.title('Q')
plt.show()
#%%%%%%%%%%%%%%5
from scipy.cluster import hierarchy
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
22,001
|
zyh88/PMU
|
refs/heads/master
|
/pv pmu data cleaning.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 12:26:15 2019
@author: hamed
"""
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import pickle as pkl
import matplotlib.pyplot as plt
import operator
import math
#%%
# =============================================================================
# =============================================================================
# # read one file of the PMU data , each file is for 10 minutes
# =============================================================================
# =============================================================================
# whole data filenames in the data directory
filenames=os.listdir("data/jul 1")
#%%
# importing data from a file function
def OneFileImport(filename):
dir_name="data/jul 1"
base_filename=filename
path=os.path.join(dir_name, base_filename)
imported_data=pd.read_csv(path,header=None, error_bad_lines=False)
return imported_data
#%%
for file in filenames:
print(file)
data=OneFileImport(file)
data=data[[1,3,4,5]]
data=data.rename(index=str, columns={1: "flag", 3: "date",4:"time",5:"value"})
groups=data.groupby('flag')
flags=data['flag'].unique()[0:13]
minn=1000000
for f in flags:
g=groups.get_group(f)
if g.shape[0]<=minn:
minn=g.shape[0]
print(minn)
selected_data={}
for f in flags:
selected_data[f]=groups.get_group(f).value.values.astype(float)[0:minn]
selected_data['time']=groups.get_group(f).time.values.astype(float)[0:minn]
selected_data=pd.DataFrame(selected_data)
selected_data=selected_data.drop('UCR_PSL_UPMU:QF',axis=1)
features=['L1MAG','L2MAG', 'L3MAG','C1MAG',
'C2MAG', 'C3MAG', 'PA', 'PB', 'PC', 'QA', 'QB', 'QC']
selected_data=selected_data.rename(index=str, columns={'_PSL_UPMU-PM1:V':'L1MAG', '_PSL_UPMU-PA1:VH':'L1ANG', '_PSL_UPMU-PM2:V':'L2MAG',
'_PSL_UPMU-PA2:VH':'L2ANG', '_PSL_UPMU-PM3:V':'L3MAG', '_PSL_UPMU-PA3:VH':'L3ANG',
'_PSL_UPMU-PM4:I':'C1MAG', '_PSL_UPMU-PA4:IH':'C1ANG', '_PSL_UPMU-PM5:I':'C2MAG',
'_PSL_UPMU-PA5:IH':'C2ANG', '_PSL_UPMU-PM6:I':'C3MAG', '_PSL_UPMU-PA6:IH':'C3ANG'})
Active={}
Reacive={}
#keys={}
pf={}
Active['A']=selected_data['L1MAG']*selected_data['C1MAG']*(np.cos((selected_data['L1ANG']-selected_data['C1ANG'])*(np.pi/180)))
Active['B']=selected_data['L2MAG']*selected_data['C2MAG']*(np.cos((selected_data['L2ANG']-selected_data['C2ANG'])*(np.pi/180)))
Active['C']=selected_data['L3MAG']*selected_data['C3MAG']*(np.cos((selected_data['L3ANG']-selected_data['C3ANG'])*(np.pi/180)))
Reacive['A']=selected_data['L1MAG']*selected_data['C1MAG']*(np.sin((selected_data['L1ANG']-selected_data['C1ANG'])*(np.pi/180)))
Reacive['B']=selected_data['L2MAG']*selected_data['C2MAG']*(np.sin((selected_data['L2ANG']-selected_data['C2ANG'])*(np.pi/180)))
Reacive['C']=selected_data['L3MAG']*selected_data['C3MAG']*(np.sin((selected_data['L3ANG']-selected_data['C3ANG'])*(np.pi/180)))
pf['A']=Active['A']/np.sqrt(np.square(Active['A'])+np.square(Reacive['A']))
pf['B']=Active['B']/np.sqrt(np.square(Active['B'])+np.square(Reacive['B']))
pf['C']=Active['C']/np.sqrt(np.square(Active['C'])+np.square(Reacive['C']))
selected_data['PA']=Active['A']
selected_data['PB']=Active['B']
selected_data['PC']=Active['C']
selected_data['QA']=Reacive['A']
selected_data['QB']=Reacive['B']
selected_data['QC']=Reacive['C']
selected_data['pfA']=pf['A']
selected_data['pfB']=pf['B']
selected_data['pfC']=pf['C']
form='.pkl'
filename=file.split(sep='.')[0]+form
dir_name="data/sorted"
path=os.path.join(dir_name,filename)
print(path)
output = open(path, 'wb')
pickle.dump(selected_data, output)
output.close()
|
{"/plot paper figures.py": ["/loading_data.py"], "/Threshold.py": ["/loading_data.py"], "/clustering.py": ["/loading_data.py"], "/new clustering.py": ["/loading_data.py"]}
|
22,028
|
Wadwadw/Weather-bot
|
refs/heads/master
|
/tele_bot.py
|
from aiogram import Bot, Dispatcher, executor, types
import logging
import parce
API_TOKEN = '#######################'
logging.basicConfig(level=logging.INFO)
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message: types.Message):
await message.answer("ะัะธะฒะตั ััะพ ะะพั-ะฟะพะณะพะดั, ะฒะฒะตะดะธ ะฝะฐะทะฒะฐะฝะธะต ะณะพัะพะดะฐ ััะพ ะฑั ัะทะฝะฐัั ะฟัะพะณะฝะพะท ะฟะพะณะพะดั ะฝะฐ 7 ะดะฝะตะน."
" ะะฐะถะผะธ ะฝะฐ /help ััะพ ะฑั ัะฒะธะดะตัั ะฟะพะดัะบะฐะทะบะธ")
@dp.message_handler(commands=['help'])
async def send_welcome(message: types.Message):
await message.answer("ะะฐะทะฒะฐะฝะธะต ะณะพัะพะดะฐ ะฝัะถะฝะพ ะฒะฒะพะดะธัั ะบะธัะธะปะปะธัะตะน, ัะตะณะธััั ะฑัะบะฒ ะฝะต ะธะผะตะตั ะทะฝะฐัะตะฝะธั. ะะพัะพะดะฐ ะฝะฐะทะฒะฐะฝะธั ะบะพัะพััั
"
"ะผะตะฝัะปะธัั ะฝัะถะฝะฝะพ ะฒะฒะพะดะธัั ะฒ ััะฐัะพะผ ัะพัะผะฐัะต. ะะฐะฟัะธะผะตั ะตัะปะธ ะฒะฐะผ ะฝัะถะฝะพ ัะทะฝะฐัั ะฟะพะณะพะดั ะฒ ะะฝะตะฟัะต ะฝัะถะฝะพ ะฒะฒะตััะธ"
" ะะฝะตะฟัะพะฟะตััะพะฒัะบ")
@dp.message_handler()
async def answer(message: types.Message):
o = parce.parce(city=message.text)
await message.answer('\n'.join(o))
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
|
{"/tele_bot.py": ["/parce.py"]}
|
22,029
|
Wadwadw/Weather-bot
|
refs/heads/master
|
/parce.py
|
import requests as re
import bs4
def parce(city='ะฟะฐะฒะปะพะณัะฐะด'):
URL = 'https://sinoptik.ua/ะฟะพะณะพะดะฐ-' + city
page = re.get(URL)
wth = bs4.BeautifulSoup(page.text, "html.parser")
description = [div['title'] for div in wth.find_all('div', title=True)]
result = [f'ะะพะณะพะดะฐ ะฒ ะณะพัะพะดะต {city} ะฝะฐ 7 ะดะฝะตะน\n']
n=-1
for i in range(1,8):
n+=1
try:
min = wth.select("div > .temperature > .min > span")
min_text = min[n].getText()
max = wth.select("div > .temperature > .max > span")
max_text = max[n].getText()
day_1 = wth.select(".day-link")
day_1_text = day_1[n].getText()
day_of_month_1 = wth.select("div > .date")
day_of_month_1_text = day_of_month_1[n].getText()
month = wth.select("div > .month")
month_text = month[n].getText()
desc = description[n]
result.append(str(day_1_text + ' ' + day_of_month_1_text + ' ' + month_text + ': ัะตะผะฟะตัะฐัััะฐ ะพั '
+ min_text + ' ะดะพ ' + max_text + '. ' + desc + '.' + '\n'))
except IndexError:
result = ['ะั ะฒะฒะตะปะธ ะณะพัะพะด ะฝะต ะฟัะฐะฒะธะปัะฝะพ ะฟะพะฟัะพะฑัะนัะต ะตัั ัะฐะท']
return result
|
{"/tele_bot.py": ["/parce.py"]}
|
22,069
|
louzounlab/SubGraphs
|
refs/heads/master
|
/model_runner.py
|
import math
import time
import os
from random import shuffle
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import nni
import logging
import networkx as nx
from loggers import EmptyLogger, CSVLogger, PrintLogger, FileLogger, multi_logger
from model import GCN, GatNet
from pre_peocess import build_2k_vectors
import pickle
CUDA_Device = 1
class ModelRunner:
def __init__(self, conf, logger, data_logger=None, is_nni=False):
self._logger = logger
self._data_logger = EmptyLogger() if data_logger is None else data_logger
self._conf = conf
self.bar = 0.5
self._lr = conf["lr"]
self._is_nni = is_nni
# choosing GPU device
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self._device != "cpu":
with torch.cuda.device("cuda:{}".format(CUDA_Device)):
torch.cuda.empty_cache()
if not self._is_nni:
self._device = torch.device("cuda:{}".format(CUDA_Device))
self._loss = self._sub_graph_ce_loss
self._ce_loss = torch.nn.CrossEntropyLoss(reduction="mean").to(self._device)
@property
def logger(self):
return self._logger
@property
def data_logger(self):
return self._data_logger
def _sub_graph_ce_loss(self, calcs, beta=None, gamma=None):
# if beta is None:
# beta = 1 / len(calcs["f_ns_out"]) if len(calcs["f_ns_out"])!=0 else 0
# gamma = 1 / len(calcs["s_ns_out"]) if len(calcs["s_ns_out"])!=0 else 0
#todo check dimensions of central nodes torch
cn_loss = self._ce_loss(calcs["cn_out"], calcs["cn_label"])
f_ns_loss = self._ce_loss(calcs["f_ns_out"], calcs["f_ns_labels"]) *(beta) if len(calcs["f_ns_out"])!=0 else 0
s_ns_loss = self._ce_loss(calcs["s_ns_out"], calcs["s_ns_labels"]) * (gamma) if len(calcs["s_ns_out"])!=0 else 0
return cn_loss+f_ns_loss+s_ns_loss
def _get_model(self):
model = GCN(in_features=self._conf["in_features"],
hid_features=self._conf["hid_features"], out_features= self._conf["out_features"],
activation=self._conf["activation"], dropout= self._conf["dropout"])
opt = self._conf["optimizer"](model.parameters(), lr=self._conf["lr"], weight_decay=self._conf["weight_decay"])
##checged : added "feature_matrices"
return {"model": model, "optimizer": opt,
# "training_mats": self._conf["training_mat"],
# "training_labels": self._conf["training_labels"],
# "test_mats": self._conf["test_mat"],
# "test_labels": self._conf["test_labels"],
"cut": self._conf["cut"],"beta": self._conf["beta"],"gamma": self._conf["gamma"],
"labels": self._conf["labels"], "X": self._conf["X"], "ds_name": self._conf["ds_name"],
"train_ind": self._conf["train_ind"], "test_ind": self._conf["test_ind"],
"adj_matrices": self._conf["adj_matrices"]
}
# verbose = 0 - silent
# verbose = 1 - print test results
# verbose = 2 - print train for each epoch and test results
def run(self, verbose=2):
if self._is_nni:
verbose = 0
model = self._get_model()
##
loss_train, acc_train, intermediate_acc_test, losses_train, accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results = self.train(
self._conf["epochs"],
model=model,
verbose=verbose)
##
# Testing . ## result is only the last one! do not use. same as 7 in last
result = self.test(model=model, verbose=verbose if not self._is_nni else 0, print_to_file=True)
test_results.append(result)
if self._is_nni:
self._logger.debug('Final loss train: %3.4f' % loss_train)
self._logger.debug('Final accuracy train: %3.4f' % acc_train)
final_results = result["acc"]
self._logger.debug('Final accuracy test: %3.4f' % final_results)
# _nni.report_final_result(test_auc)
if verbose != 0:
names = ""
vals = ()
for name, val in result.items():
names = names + name + ": %3.4f "
vals = vals + tuple([val])
self._data_logger.info(name, val)
parameters = {"temporal_pen": self._conf["temporal_pen"], "lr": self._conf["lr"],
"weight_decay": self._conf["weight_decay"],
"dropout": self._conf["dropout"], "optimizer": self._conf["optim_name"]}
return loss_train, acc_train, intermediate_acc_test, result, losses_train, accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results, parameters
def train(self, epochs, model=None, verbose=2):
loss_train = 0.
acc_train = 0.
losses_train = []
accs_train = []
accs_cn_train = []
accs_f_train = []
accs_s_train = []
test_results = []
intermediate_test_acc = []
for epoch in range(epochs):
loss_train, acc_train, acc_train_cn , acc_train_f, acc_train_s= self._train(epoch, model, verbose)
##
losses_train.append(loss_train)
accs_train.append(acc_train)
accs_cn_train.append(acc_train_cn)
#if acc_train_f!=0:
accs_f_train.append(acc_train_f)
# if acc_train_s!=0:
accs_s_train.append(acc_train_s)
##
# /---------------------- FOR NNI -------------------------
if epoch % 5 == 0:
test_res = self.test(model, verbose=verbose if not self._is_nni else 0)
test_results.append(test_res)
if self._is_nni:
test_acc = test_res["acc"]
intermediate_test_acc.append(test_acc)
return loss_train, acc_train, intermediate_test_acc, losses_train, \
accs_train, accs_cn_train, accs_f_train, accs_s_train, test_results
def calculate_labels_outputs(self,node, outputs , labels, indices, ego_graph):
f_neighbors = list(ego_graph.neighbors(node))
s_neighbors = []
for f_neighbor in f_neighbors:
for s_neighbor in ego_graph.neighbors(f_neighbor):
if s_neighbor not in f_neighbors and s_neighbor != node and s_neighbor not in s_neighbors:
s_neighbors += [s_neighbor]
cn_out= outputs[[list(ego_graph.nodes).index(node)]]
cn_label = labels[[node]] ##todo [node]
f_ns_out = outputs[[list(ego_graph.nodes).index(f_n) for f_n in f_neighbors if f_n in indices]]
f_ns_labels = labels[[f_n for f_n in f_neighbors if f_n in indices]]
s_ns_out = outputs[[list(ego_graph.nodes).index(s_n) for s_n in s_neighbors if s_n in indices]]
s_ns_labels = labels[[s_n for s_n in s_neighbors if s_n in indices]]
return { "cn_out": cn_out, "cn_label": cn_label, "f_ns_out": f_ns_out, "f_ns_labels": f_ns_labels, "s_ns_out": s_ns_out, "s_ns_labels": s_ns_labels }
def _train(self, epoch, model, verbose=2):
model_ = model["model"]
model_ = model_.to(self._device)
optimizer = model["optimizer"]
cut = model["cut"]
train_indices = model["train_ind"]
model["labels"] = model["labels"].to(self._device)
labels = model["labels"]
beta = model["beta"]
gamma = model["gamma"]
model_.train()
optimizer.zero_grad()
loss_train = 0.
acc_train = 0
acc_train_cn, acc_train_f, acc_train_s = 0,0,0
f_nones = 0; s_nones = 0
# create subgraphs only for partial, but use labels of all.
partial_train_indices = train_indices[0:int(cut*len(train_indices))]
for node in partial_train_indices: #this may be in batches for big graphs todo
adj = model["adj_matrices"][node]
X_t = model["X"][list(adj.nodes)].to(device=self._device)
output = model_(X_t, nx.adjacency_matrix(adj).tocoo())
calcs = self.calculate_labels_outputs( node, output, labels, train_indices, adj)
loss_train += self._loss(calcs, beta, gamma)
acc, acc_cn, acc_f, acc_s = self.accuracy(calcs)
acc_train_cn+= acc_cn
if acc_f!=None:
acc_train_f += acc_f
else:
f_nones+=1
if acc_s!=None:
acc_train_s+=acc_s
else:
s_nones+=1
acc_train += acc
loss_train /= len(partial_train_indices)
acc_train_cn /= len(partial_train_indices)
if len(partial_train_indices)-f_nones !=0:
acc_train_f /= (len(partial_train_indices)-f_nones)
else:
acc_train_f = np.nan
if len(partial_train_indices)-s_nones !=0:
acc_train_s /= (len(partial_train_indices)-s_nones)
else:
acc_train_s = np.nan
acc_train/= len(partial_train_indices)
#print("Train Acc on cn", acc_train_cn / 1, "Acc first nodes", acc_train_f, "Acc second nodes", acc_train_s)
loss_train.backward()
optimizer.step()
if verbose == 2:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
self._logger.debug('Epoch: {:04d} '.format(epoch + 1) +
'ce_loss_train: {:.4f} '.format(loss_train.data.item()) +
'acc_train: {:.4f} '.format(acc_train))
return loss_train, acc_train, acc_train_cn , acc_train_f, acc_train_s
@staticmethod
def accuracy(calcs):
# return {"cn_out": cn_out, "cn_label": cn_label, "f_ns_out": f_ns_out, "f_ns_labels": f_ns_labels,
# "s_ns_out": s_ns_out, "s_ns_labels": s_ns_labels}
acc = 0
acc_cn, acc_f, acc_s = 0,0,0
for idx, sample in enumerate(calcs["f_ns_out"]):
if torch.argmax(sample) == calcs["f_ns_labels"][idx]:
acc+=1
acc_f+=1
for idx, sample in enumerate(calcs["s_ns_out"]):
if torch.argmax(sample) == calcs["s_ns_labels"][idx]:
acc+=1
acc_s+=1
if torch.argmax(calcs["cn_out"]) == calcs["cn_label"]:
acc+=1
acc_cn+=1
size_labeld = len(calcs["cn_out"])+len(calcs["s_ns_out"])+len(calcs["f_ns_out"])
#print(acc_cn, acc_f,acc_s)
acc_f = acc_f/len(calcs["f_ns_out"]) if len(calcs["f_ns_out"])!=0 else None
acc_s = acc_s / len(calcs["s_ns_out"]) if len(calcs["s_ns_out"]) != 0 else None
#print("Acc on cn", acc_cn/1, "Acc first nodes", acc_f, "Acc second nodes",acc_s)
#return acc/size_labeld # for all with no change between first and seconds
return acc/size_labeld, acc_cn/1, acc_f, acc_s
def test(self, model=None, verbose=2, print_to_file=False):
model_ = model["model"]
test_indices = model["test_ind"]
labels = model["labels"]
beta = model["beta"]
gamma = model["gamma"]
model_.eval()
test_loss = 0
test_acc = 0
acc_test_cn, acc_test_f, acc_test_s = 0, 0, 0
f_nones= 0; s_nones= 0
partial_rand_test_indices = np.random.choice(len(test_indices), round(0.05*len(test_indices)) , replace=False)
#partial_rand_test_indices = test_indices
#partial_test_indices = test_indices[0:int(1 * len(test_indices))] ## 1 is all
for node in partial_rand_test_indices: #this may be in batches for big graphs todo
#adj is the ego graph (that will be converted into adj matrix and coo).
adj = model["adj_matrices"][node]
import random
random.shuffle(adj.nodes)
X_t = model["X"][list(adj.nodes)].to(device=self._device)
print(X_t[0])
random.shuffle(X_t)
print(X_t[0],"after")
#todo this may be given as another param, to avoid using cpu calculations here
output = model_(X_t, nx.adjacency_matrix(adj).tocoo())
calcs = self.calculate_labels_outputs( node, output, labels, test_indices, adj)
test_loss += self._loss(calcs, beta, gamma)
#test_acc += self.accuracy(calcs)
acc, acc_cn, acc_f, acc_s = self.accuracy(calcs)
acc_test_cn += acc_cn
if acc_f!=None:
acc_test_f += acc_f
else:
f_nones +=1
if acc_s != None:
acc_test_s += acc_s
else:
s_nones +=1
test_acc += acc
test_loss /= len(partial_rand_test_indices)
test_acc /= len(partial_rand_test_indices)
acc_test_cn /= len(partial_rand_test_indices); acc_test_f /= (len(partial_rand_test_indices)-f_nones); acc_test_s /= (len(partial_rand_test_indices)-s_nones)
#print("Test Acc on cn", acc_test_cn/1, "Acc first nodes", acc_test_f, "Acc second nodes",acc_test_s)
if verbose != 0:
self._logger.info("Test: ce_loss= {:.4f} ".format(test_loss.data.item()) + "acc= {:.4f}".format(test_acc))
#result = {"loss": loss_test.data.item(), "acc": acc_test, "tempo_loss": tempo_loss.data.item()}
result = {"loss": test_loss, "acc": test_acc, "acc_cn": acc_test_cn, "acc_f":acc_test_f, "acc_s":acc_test_s}
return result
def plot_graphs(train_loss_mean, train_acc_mean,train_cn_acc_mean,train_f_acc_mean, train_s_acc_mean, test_loss_mean, test_acc_mean,
test_cn_acc_mean,test_f_acc_mean,test_s_acc_mean, parameters, plots_data):
# info[4] is list of train losses 1 . list[5] is list of acc train.
#info [6] is list of dictionaries, each dictionary is for epoch, each one contains "loss" - first loss,"acc"- acc, "tempo_loss" - tempo loss
#info[7] is the temporal_oen
regulariztion = str(round(parameters["weight_decay"],3))
lr = str(round(parameters["lr"],3))
optimizer = str(parameters["optimizer"])
dropout = str(round(parameters["dropout"],2))
cut = plots_data["cut"]*100
ds_name = plots_data["ds_name"]
#Train
# Share a X axis with each column of subplots
fig, axes = plt.subplots(2, 3, figsize=(12, 10))
plt.suptitle("DataSet: " + ds_name
+ ", final_train_accuracies_mean: " + str(round(plots_data["final_train_accuracies_mean"],2)) + ", final_train_accuracies_ste: " + str(round(plots_data["final_train_accuracies_ste"],2))
+ "\nfinal_test_accuracies_mean: " + str(round(plots_data["final_test_accuracies_mean"],2)) + ", final_test_accuracies_ste: " + str(round(plots_data["final_test_accuracies_ste"],2))
+ "\nlr="+lr+" reg= "+regulariztion+ ", dropout= "+dropout+", opt= "+optimizer, fontsize=12, y=0.99)
epoch = [e for e in range(1, len(train_loss_mean)+1)]
axes[0, 0].set_title('Loss train')
axes[0, 0].set_xlabel("epochs")
axes[0, 0].set_ylabel("Loss")
axes[0, 0].plot(epoch, train_loss_mean)
axes[0, 1].set_title('Accuracy train')
axes[0, 1].set_xlabel("epochs")
axes[0, 1].set_ylabel("Accuracy")
axes[0, 1].plot(epoch, train_acc_mean)
axes[0, 2].set_title('Accuracy layers Train')
axes[0, 2].set_xlabel("epochs")
axes[0, 2].set_ylabel("Accuracies")
axes[0, 2].plot(epoch, train_cn_acc_mean, label='CentralNode')
axes[0, 2].plot(epoch, train_f_acc_mean, label='FirstNeighbors')
axes[0, 2].plot(epoch, train_s_acc_mean, label='SecondNeighbors')
axes[0, 2].legend(loc='best')
#test
epoch = [e for e in range(1, len(test_loss_mean)+1)]
axes[1, 0].set_title('Loss test')
axes[1, 0].set_xlabel("epochs")
axes[1, 0].set_ylabel("Loss")
axes[1, 0].plot(epoch, test_loss_mean)
axes[1, 1].set_title('Accuracy test')
axes[1, 1].set_xlabel("epochs")
axes[1, 1].set_ylabel("Accuracy")
axes[1, 1].plot(epoch, test_acc_mean)
axes[1, 2].set_title('Accuracy layers Test')
axes[1, 2].set_xlabel("epochs")
axes[1, 2].set_ylabel("Accuracies")
axes[1, 2].plot(epoch, test_cn_acc_mean, label='CentralNode')
axes[1, 2].plot(epoch, test_f_acc_mean, label='FirstNeighbors')
axes[1, 2].plot(epoch, test_s_acc_mean, label='SecondNeighbors')
axes[1, 2].legend(loc='best')
fig.tight_layout()
plt.subplots_adjust(top=0.85)
# fig.delaxes(axes[1,0])
plt.savefig("figures/"+plots_data["ds_name"]+"_.png")
plt.clf()
#plt.show()
def execute_runner(runners, plots_data, is_nni=False):
train_losses = []
train_accuracies = []
train_cn_accuracies = []
train_f_accuracies = []
train_s_accuracies = []
test_intermediate_results = []
test_losses = []
test_accuracies = []
test_cn_accuracies = []
test_f_accuracies = []
test_s_accuracies = []
results = []
last= runners[-1]
for i in range(len(runners)):
#for idx_r, runner in enumerate(runners):
with torch.cuda.device("cuda:{}".format(CUDA_Device)):
torch.cuda.empty_cache()
time.sleep(1)
print("trial number",i)
result_one_iteration = runners[0].run(verbose=2)
train_losses.append(result_one_iteration[0])
train_accuracies.append(result_one_iteration[1])
test_intermediate_results.append(result_one_iteration[2])
test_losses.append(result_one_iteration[3]["loss"])
test_accuracies.append(result_one_iteration[3]["acc"])
results.append(result_one_iteration)
#todo check if can be deleted (from first check - not changing)
if len(runners) >1:
runners=runners[1:]
print("len runners", len(runners))
# for printing results on graphs. for other uses - the last result is the one should be used.
size = len(results)
#train_loss_mean = torch.stack([torch.tensor([results[j][4][i] for i in range(len(results[j][4]))]) for j in range(size)]).mean(axis=0)
train_loss_mean = np.mean([ [results[j][4][i].item() for i in range(len(results[j][4]))] for j in range(size) ], axis=0)
#train_acc_mean = torch.stack([ torch.tensor([results[j][5][i] for i in range(len(results[j][5]))]) for j in range(size) ]).mean(axis=0)
train_acc_mean = np.mean([ [results[j][5][i] for i in range(len(results[j][5]))] for j in range(size) ], axis=0)
train_cn_acc_mean = np.mean([[results[j][6][i] for i in range(len(results[j][6]))] for j in range(size)], axis=0)
train_f_acc_mean = np.nanmean([[results[j][7][i] for i in range(len(results[j][7]))] for j in range(size)], axis=0)
train_s_acc_mean = np.nanmean([[results[j][8][i] for i in range(len(results[j][8]))] for j in range(size)], axis=0)
#test_loss_mean = torch.stack([ torch.tensor([results[j][6][i]["loss"] for i in range(len(results[j][6]))]) for j in range(size) ]).mean(axis=0)
test_loss_mean = np.mean([ [results[j][9][i]["loss"].item() for i in range(len(results[j][9]))] for j in range(size) ], axis=0)
#test_acc_mean = torch.stack([ torch.tensor([torch.tensor(results[j][6][i]["acc"]) for i in range(len(results[j][6]))]) for j in range(size) ])
test_acc_mean = np.mean([ [results[j][9][i]["acc"] for i in range(len(results[j][9]))] for j in range(size) ], axis=0 )
test_cn_acc_mean = np.mean([[results[j][9][i]["acc_cn"] for i in range(len(results[j][9]))] for j in range(size)], axis=0)
test_f_acc_mean = np.mean([[results[j][9][i]["acc_f"] for i in range(len(results[j][9]))] for j in range(size)], axis=0)
test_s_acc_mean = np.mean([[results[j][9][i]["acc_s"] for i in range(len(results[j][9]))] for j in range(size)], axis=0) #todo take care of None here too?
final_train_accuracies_mean = np.mean(train_accuracies)
final_train_accuracies_ste = np.std(train_accuracies) / math.sqrt(len(runners))
final_test_accuracies_mean = np.mean(test_accuracies)
final_test_accuracies_ste = np.std(test_accuracies) / math.sqrt(len(runners))
plots_data["final_train_accuracies_mean"] = final_train_accuracies_mean
plots_data["final_train_accuracies_ste"] = final_train_accuracies_ste
plots_data["final_test_accuracies_mean"] = final_test_accuracies_mean
plots_data["final_test_accuracies_ste"] = final_test_accuracies_ste
#plot to graphs
plot_graphs(train_loss_mean, train_acc_mean,train_cn_acc_mean,train_f_acc_mean, train_s_acc_mean, test_loss_mean, test_acc_mean,
test_cn_acc_mean,test_f_acc_mean,test_s_acc_mean, results[0][10], plots_data)
if is_nni:
mean_intermediate_res = np.mean(test_intermediate_results, axis=0)
for i in mean_intermediate_res:
nni.report_intermediate_result(i)
nni.report_final_result(np.mean(test_accuracies))
# T takes the final of each iteration and for them mkes mean and std
last.logger.info("*" * 15 + "Final accuracy train: %3.4f" % final_train_accuracies_mean)
last.logger.info("*" * 15 + "Std accuracy train: %3.4f" % final_train_accuracies_ste)
last.logger.info("*" * 15 + "Final accuracy test: %3.4f" % final_test_accuracies_mean)
last.logger.info("*" * 15 + "Std accuracy test: %3.4f" % final_test_accuracies_ste)
last.logger.info("Finished")
return
def build_model(rand_test_indices, train_indices, labels ,adjacency_matrices,X,in_features,
hid_features,out_features,ds_name, cut, activation, optimizer, epochs, dropout, lr, l2_pen, temporal_pen,
beta, gamma, dumping_name, is_nni=False):
optim_name="SGD"
if optimizer==optim.Adam:
optim_name = "Adam"
conf = {"in_features":in_features, "hid_features": hid_features, "out_features":out_features,"ds_name":ds_name, "cut": cut,
"dropout": dropout, "lr": lr, "weight_decay": l2_pen,
"temporal_pen": temporal_pen, "beta": beta, "gamma": gamma,
#"training_mat": training_data, "training_labels": training_labels,
# "test_mat": test_data, "test_labels": test_labels,
"train_ind": train_indices, "test_ind": rand_test_indices, "labels":labels, "X":X,
"adj_matrices": adjacency_matrices,
"optimizer": optimizer, "epochs": epochs, "activation": activation,"optim_name":optim_name}
products_path = os.path.join(os.getcwd(), "logs", dumping_name, time.strftime("%Y%m%d_%H%M%S"))
if not os.path.exists(products_path):
os.makedirs(products_path)
logger = multi_logger([
PrintLogger("MyLogger", level=logging.DEBUG),
FileLogger("results_%s" % dumping_name, path=products_path, level=logging.INFO)], name=None)
data_logger = CSVLogger("results_%s" % dumping_name, path=products_path)
data_logger.info("model_name", "loss", "acc")
# ##
# logger.info('STARTING with cut= {:.3f} '.format(cut*100) + ' lr= {:.4f} '.format(lr) + ' dropout= {:.4f} '.format(dropout)+ ' regulariztion_l2_pen= {:.4f} '.format(l2_pen)
# + ' temporal_pen= {:.10f} '.format(temporal_pen)+ ' beta= {:.5f} '.format(beta)+ ' gamma= {:.5f} '.format(gamma)+ ' optimizer= %s ' %optim_name)
# logger.debug('STARTING with lr= {:.4f} '.format(lr) + ' dropout= {:.4f} '.format(dropout) + ' regulariztion_l2_pen= {:.4f} '.format(l2_pen)
# + ' temporal_pen= {:.10f} '.format(temporal_pen) +' beta= {:.5f} '.format(beta)+ ' gamma= {:.5f} '.format(gamma)+ ' optimizer= %s ' %optim_name)
# ##
runner = ModelRunner(conf, logger=logger, data_logger=data_logger, is_nni=is_nni)
return runner
def main_gcn(adj_matrices, X, labels,in_features, hid_features, out_features, ds_name, cut,
optimizer=optim.Adam, epochs=200, dropout=0.3, lr=0.01, l2_pen=0.005, temporal_pen=1e-6, beta=1/4, gamma = 1/16,
trials=1, dumping_name='', is_nni=False):
plot_data = {"ds_name": ds_name, "cut": cut}
runners = []
#np.random.seed(2)
#print("epochs", epochs,"l2_pen", l2_pen,"dropout", dropout,"dropout", cut,"cut", dropout)
for it in range(trials):
num_classes = out_features
rand_test_indices = np.random.choice(len(labels), len(labels)-(20*num_classes), replace=False) #
train_indices = np.delete(np.arange(len(labels)), rand_test_indices)
#train_indices = train_indices[0:int(cut*len(train_indices))]
#create x - releveant for 2k only
# X = build_2k_vectors(ds_name, out_features, train_indices)
activation = torch.nn.functional.relu
runner = build_model(rand_test_indices, train_indices, labels, adj_matrices,X, in_features, hid_features,
out_features,ds_name,cut, activation, optimizer, epochs, dropout, lr,
l2_pen, temporal_pen, beta, gamma, dumping_name, is_nni=is_nni)
runners.append(runner)
execute_runner(runners, plot_data, is_nni=is_nni)
return
|
{"/model_runner.py": ["/pre_peocess.py"]}
|
22,070
|
louzounlab/SubGraphs
|
refs/heads/master
|
/pre_peocess.py
|
import networkx as nx
import pickle
import numpy as np
import os
# dataSetName = "PubMed"
# num_classes = 3
# avarage_deg = 4.496018664096972
'DataSets: ' \
'dataSetName = "cora"; num_classes = 7; avarage_deg = 3.8980797636632203' \
'dataSetName = "CiteSeer"; num_classes = 6; avarage_deg = 2.7363991584009617' \
'dataSetName = "PubMed"; num_classes = 3; avarage_deg = 4.496018664096972'
def build_2k_vectors(ds_name, num_classes, train_indices):
with open(os.path.join("dataSets","gnx_"+ds_name+".pkl"), 'rb') as f:
gnx = pickle.load(f)
with open(os.path.join("dataSets","labels_"+ds_name+".pkl"), 'rb') as f:
labels = pickle.load(f)
print("start bulding X")
X = np.zeros((len(gnx), 2 * num_classes))
X2 = np.zeros((len(gnx), 2))
for i in range(X.shape[0]):
# if i%100 == 0:
# print("iteration number", i)
f_neighbors = list(gnx.neighbors(i))
s_neighbors = []
for f_neighbor in f_neighbors:
for s_neighbor in gnx.neighbors(f_neighbor):
if s_neighbor not in f_neighbors and s_neighbor != i and s_neighbor not in s_neighbors:
s_neighbors += [s_neighbor]
#sub = nx.ego_graph(gnx, 0, radius= 2)
'part of "if n1 in train_indices " is for making cosideration only for nodes from train, as described in the article)'
X[i][0:num_classes] = [len([n1 for n1 in f_neighbors if n1 in train_indices and labels[n1] == cls]) for cls in range(num_classes)]
X[i][num_classes:] = [len([n2 for n2 in s_neighbors if n2 in train_indices and labels[n2] == cls]) for cls in range(num_classes)]
print("finish bulding X")
'not needed when loading X for each train set (thats because we 2k vectors, and the neighbors of the 2k should be calculated' \
'only on nodes from the train set) '
# with open(os.path.join("dataSets","X_manipulations_"+dataSetName+".pkl"), 'wb') as handle:
# pickle.dump(X, handle, protocol=pickle.HIGHEST_PROTOCOL)
return X
def build_x():
with open(os.path.join("dataSets","gnx_"+dataSetName+".pkl"), 'rb') as f:
gnx = pickle.load(f)
with open(os.path.join("dataSets","labels_"+dataSetName+".pkl"), 'rb') as f:
labels = pickle.load(f)
# with open("X_manipulations"+dataSetName+".pkl", 'rb') as f:
# X = pickle.load(f)
print("building X manipulations")
build_2k_vectors(gnx,labels)
print("building ego graphs")
#------------create ego graphs
ego_graphs = []
for i in range(len(gnx)):
sub = nx.ego_graph(gnx,i,radius=2)
ego_graphs.append(sub)
with open(os.path.join("dataSets","ego_graphs_"+dataSetName+".pkl"), 'wb') as handle:
pickle.dump(ego_graphs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# #-------------------check the ego graphs
# with open(os.path.join("dataSets","ego_graphs.pkl"), 'rb') as f:
# egos = pickle.load(f)
##-------------------some tries
# sub = nx.adjacency_matrix(nx.ego_graph(gnx, 0, radius= 2))
# g=nx.Graph()
# g.add_edges_from([(5,2),(3,5)])
# g=nx.adj_matrix(g)
# g2=nx.from_scipy_sparse_matrix(g)
# b=3
def check_degree():
with open(os.path.join("dataSets","gnx_"+dataSetName+".pkl"), 'rb') as f:
gnx = pickle.load(f)
with open(os.path.join("dataSets","labels_"+dataSetName+".pkl"), 'rb') as f:
labels = pickle.load(f)
return sum([tup[1] for tup in gnx.degree])/len(gnx)
if __name__ == '__main__':
#build_x()
# avarage_deg = check_degree()
# print(avarage_deg)
b=3
|
{"/model_runner.py": ["/pre_peocess.py"]}
|
22,072
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Panels.py
|
Panel_Name = ['Void', 'Neutral', 'Check', 'Encounter', 'Draw', 'Bonus', 'Drop', 'Warp', 'Draw_2', 'Bonus_2', 'Drop_2', 'Deck',
'Encounter_2', 'Move', 'Move_2', 'WarpMove', 'WarpMove_2', 'Snow', 'Ice', 'Heal', 'Heal_2','Boss','Damage','Damage_2']
Panel_Int = [0,1,2,3,4,5,6,7,8,9,10,18,20,21,22,23,24,25,26,27,28,31,32,33]
Button_Brush_Int = [0,2,5,9,6,10,3,20,4,8,21,22,23,24,7,25,1,18,26,27,28,31,32,33]
assert(len(Panel_Name) == len(Panel_Int))
assert(len(Panel_Name) == len(Button_Brush_Int))
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,073
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Graphics.py
|
import os
from PIL import Image
class OFE_Graphics:
def __init__(self, zoom_list, path):
#ๅๅงๅ
self.zoom_list = zoom_list
#ๅฐpath็ฎๅฝไธๆๆๆไปถ๏ผไธๅ
ๅซๅ
ถไธ็บง็ฎๅฝ๏ผ๏ผ่ฅ่ฝๆๅผ๏ผๅ
จ้จๅญๅ
ฅใ
print('[Loading images...]')
self.img_o_dict = {}
bad_img = []
for file_name in os.listdir(path):
try:
img = Image.open(path + '/' + file_name)
except:
bad_img.append(file_name)
else:
dot_pos = file_name.index('.')
name = file_name[:dot_pos]
self.img_o_dict[name] = img
#ๆฑๆฅๅ ่ฝฝ็ปๆ
img_count = len(self.img_o_dict)
print(img_count, 'images have been loaded.')
for file_name in bad_img:
print('Warning: ' + file_name + ' is not a image')
#ๅฐไธๅzoom level็ๅพ็ๆๅบๆฅ
print('[Creating zooming images...]')
self.img_zoom_dict = self.Img_Zoom(self.img_o_dict, zoom_list)
#ๆฑๆฅ็ๆ็zoomๆป่ฎก
print(zoom_list, 'zoom levels have been created')
def get_image(self, name, zoom = 1.0):
try:
img = self.img_zoom_dict[name][zoom]
except:
if not name in self.img_zoom_dict:
print('Error: ', name, ' is not in graphics')
if not zoom in self.zoom_list:
print('Error: ', zoom, ' is not in zoom_list')
else:
return img
def Img_Zoom(self, img_o_dict, zoom_list):
new_dict = {}
PX = 128
for name in img_o_dict:
new_dict[name] = {}
img = img_o_dict[name]
for zoom in zoom_list:
px = int(PX * zoom)
img_new = img.resize((px,px), Image.BICUBIC)
new_dict[name][zoom] = img_new
return new_dict
if __name__ == '__main__':
graphics = OFE_Graphics([0.5, 0.75], r'D:\OneDrive\ไธชไบบ\100oj\ๆฉๆฑๅฐๅพ็ผ่พๅจ\OFEๆญฃๅผv1.0\panels')
graphics.get_image('Panel_Bonus', 0.5)
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,074
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Canvas.py
|
#OFE_Canvas
import sys, os
import copy
from PyQt5 import QtGui, QtWidgets, QtCore
from PIL import Image
from PIL.ImageQt import ImageQt
from OFE.OFE_Field import OFE_Field
from OFE.OFE_Image import OFE_Image
from OFE import Panel_Int, Panel_Name, Button_Brush_Int
#ๆ น็ฎๅฝ
path0 = os.path.dirname(__file__)
#ๆ็
งlist[y][x]ๅถไฝๆฐImg
def New_Px(DATA):
type = "RGBA"
if len(DATA[0][0]) == 3:
type = "RGB"
Y = len(DATA)
X = len(DATA[0])
Img = Image.new("RGBA", (X,Y),(0,0,0,0))
PUT_DATA = []
for raw in DATA:
PUT_DATA += raw
Img.putdata(PUT_DATA)
return Img
#ๅพ็ๆ ผๅผ่ฝฌๆข
def PIXMAP(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
return pixmap
#Zoom List
Zoom_List = [0.5]
#็ปๆฟๆฌไฝ
class Canvas(QtWidgets.QLabel):
def __init__(self, field, PARAMETER, statue, App = None, file_name = 'Title', file_path = '', parent = None):
super(Canvas,self).__init__(parent)
self.init(field, PARAMETER, statue, App, file_name, file_path)
def init(self, field, PARAMETER, statue, App = None, file_name = 'Title', file_path = ''):
#ๅๅงๅ
self.setMouseTracking(True)
self.file_name = file_name
self.file_path = file_path
#ไปPARAMETERไธญๆๅ
self.Graphics = PARAMETER['Graphics']
self.file = {'Field':field, 'History':[], 'Pos':1, 'Change':0}
self.Selected = {'Type': 'None', 'Pos_Start':(0,0), 'Pos_End':(0,0), 'Copy_Index': 0,
'Trans_Field': None, 'Trans_Pos': (0,0), 'Trans_Img': None, 'Trans_Dis': (0,0), 'Duplicate_Index': 0}
if self.Is_Field():
self.file['History'].append(copy.deepcopy(field))
self.file['Pos'] = 1
self.PARAMETER = PARAMETER
self.statue = statue
self.App = App
#ๅๅงๅ
self.Field_Img = None
self.Paint_Command = {'All':None}
self.Save_Index = 0
#้ผ ๆ ๆ ผๅญไฝ็ฝฎ
self.X = 0
self.Y = 0
self.X_old = 0
self.Y_old = 0
#ๅๅง็ปๅฎๅคงๅฐ
self.Img_Draw({'All':None})
#ๆฏๅฆๆฏๅฐๅพๆไปถ
def Is_Field(self):
if self.file['Field'].data:
return True
else:
False
#่ฟๅๆฌๅฐๅพ
def Field(self):
return self.file['Field']
##้ผ ๆ
#่ฎพ็ฝฎ็งปๅจไธไธ้
def Set_XY(self, pos):
Min = 0
Size = self.file['Field'].size()
MaxX = Size[0] - 1
MaxY = Size[1] - 1
return max(0, min(pos[0], MaxX)), max(0, min(pos[1], MaxY))
#่ฟๅๆฏๅฆๅจๆ ผ็นไธๅ็็งปๅจ
def Is_Move(self):
if self.X != self.X_old or self.Y != self.Y_old:
return True
else:
return False
#ไฝ็ฝฎไฝๅทฎ
def Pos_Minus(self, pos1, pos2):
posnew = (pos1[0]-pos2[0], pos1[1]-pos2[1])
return posnew
#ไฝ็ฝฎไฝๅ
def Pos_Add(self, pos1, pos2):
posnew = (pos1[0]+pos2[0], pos1[1]+pos2[1])
return posnew
#่ฟๅๆฏๅฆๅคๅจTransformๅบๅไธญไปฅๅๅๅทฎ่ท็ฆป
def Distance(self):
x1 = self.Selected['Trans_Pos'][0]
y1 = self.Selected['Trans_Pos'][1]
size_area = self.Selected['Trans_Field'].size()
x2 = x1 + size_area[0]
y2 = y1 + size_area[1]
if self.X in range(x1, x2) and self.Y in range(y1, y2):
x_dis = self.X - x1
y_dis = self.Y - y1
return (x_dis, y_dis)
else:
return None
#้ผ ๆ ็นๅป
def mousePressEvent(self, event):
panel_count = len(Panel_Int)
button_count = 6
transform_count = 6
command = {}
if self.file['Field'].data:
#้ผ ๆ ไฝ็ฝฎ
pos = event.pos()
x = pos.x()
y = pos.y()
PX = 128
zoom = self.PARAMETER['Img_parameter']['Zoom']
px = int(PX * zoom)
self.X, self.Y = self.Set_XY((int(x / px), int(y / px)))
POS = (self.X, self.Y)
if event.button() == QtCore.Qt.RightButton:
#ๅณ้ฎๆไธๆไปค
print(event.pos())
if self.file['Field'].data:
Button_id = self.PARAMETER['Command']['Button']
#Brushๅ ้คๆจกๅผ
if Button_id >= 0 and Button_id < panel_count:
panel_id = 0
ischange = self.Point_Panel(panel_id)
if ischange:
command['Point'] = POS
#ๅ ้ค็ฎญๅคด
if Button_id >= panel_count + 1 and Button_id <= panel_count + 3:
ischange = self.Point_Arrow(arrow_command = [-1,-1,-1,-1])
if ischange:
command['Point'] = POS
elif event.button() == QtCore.Qt.LeftButton:
#ๅทฆ้ฎๆไธๆไปค
if self.file['Field'].data:
Button_id = self.PARAMETER['Command']['Button']
#Brushๆจกๅผ
if Button_id >= 0 and Button_id < panel_count:
B_command = self.PARAMETER['Command']['Button']
panel_id = Button_Brush_Int[B_command]
ischange = self.Point_Panel(panel_id)
if ischange:
command['Point'] = POS
#ๅ ้ค็ฎญๅคด
if Button_id == panel_count + 1 :
ischange = self.Point_Arrow(arrow_command = [-1,-1,-1,-1])
if ischange:
command['Point'] = POS
#้ๆฉๆจกๅผ
if Button_id == panel_count:
if self.Selected['Type'] == 'None' or self.Selected['Type'] == 'Selected':
#ๅๆถCopy IndexError
self.Selected['Copy_Index'] = 0
self.Selected['Type'] = 'Selecting'
self.Selected['Pos_Start'] = POS
self.Selected['Pos_End'] = POS
command['Cursor'] = None
#ๅๆขๆจกๅผ
if Button_id == panel_count:
if self.Selected['Type'] == 'Transform':
dis = self.Distance()
if dis:
self.Selected['Type'] = 'Transforming'
self.Selected['Trans_Dis'] = dis
if command != {}:
self.A_Paint(command)
#้ผ ๆ ็งปๅจ
def mouseMoveEvent(self, event):
if self.file['Field'].data:
#ๅฐๅฝๅไฝ็ฝฎ่ฎฐๅฝ
self.X_old = self.X
self.Y_old = self.Y
command = {}
if self.file['Field'].data:
#้ผ ๆ ไฝ็ฝฎ
pos = event.pos()
x = pos.x()
y = pos.y()
PX = 128
zoom = self.PARAMETER['Img_parameter']['Zoom']
px = int(PX * zoom)
self.X, self.Y = self.Set_XY((int(x / px), int(y / px)))
POS = (self.X, self.Y)
#็ถๆๆกๆนๅ
text = ''
text += 'size = ('+str(self.file['Field'].size()[0])+', '+str(self.file['Field'].size()[1])+')'
text += ' | '
text += 'pos = ('+str(self.X)+', '+str(self.Y)+')'
self.statue.showMessage(text)
#ๅทๆฐๅ
ๆ
if self.Is_Move():
command['Cursor'] = None
if event.buttons() == QtCore.Qt.RightButton:
#ๅณ้ฎ็งปๅจๆไปค
print(event.pos())
if self.file['Field'].data:
Button_id = self.PARAMETER['Command']['Button']
#Brushๅ ้คๆจกๅผ
if Button_id >= 0 and Button_id < len(Button_Brush_Int):
panel_id = 0
ischange = self.Point_Panel(panel_id)
if ischange:
command['Point'] = POS
#ๅ ้ค็ฎญๅคด
if Button_id >= len(Button_Brush_Int) + 1 and Button_id <= len(Button_Brush_Int) + 3:
ischange = self.Point_Arrow(arrow_command = [-1,-1,-1,-1])
if ischange:
command['Point'] = POS
elif event.buttons() == QtCore.Qt.LeftButton:
#ๅทฆ้ฎๆไธๆไปค
if self.file['Field'].data:
Button_id = self.PARAMETER['Command']['Button']
#Brushๆจกๅผ
if Button_id >= 0 and Button_id < len(Button_Brush_Int):
B_command = self.PARAMETER['Command']['Button']
panel_id = Button_Brush_Int[B_command]
ischange = self.Point_Panel(panel_id)
if ischange:
command['Point'] = POS
#ๅ ้ค็ฎญๅคด
if Button_id == len(Button_Brush_Int) + 1:
ischange = self.Point_Arrow(arrow_command = [-1,-1,-1,-1])
if ischange:
command['Point'] = POS
#Line็ฎญๅคด
if Button_id == len(Button_Brush_Int) + 2:
POS_old = (self.X_old, self.Y_old)
#ไธคไธชๆ ผๅญไธญๆไปปๆ่็ฉบๆ ผๆ ๆ
if not (self.file['Field'].Point_IsVoid(POS) or self.file['Field'].Point_IsVoid(POS_old)):
Arrow_Name = ['Left', 'Up', 'Right', 'Down']
arrow_command = [0,0,0,0]
arrow_add = ''
if self.X - self.X_old == -1:
arrow_add = 'Left'
elif self.X - self.X_old == 1:
arrow_add = 'Right'
elif self.Y - self.Y_old == -1:
arrow_add = 'Up'
elif self.Y - self.Y_old == 1:
arrow_add = 'Down'
if arrow_add != '':
arrow_command[Arrow_Name.index(arrow_add)] = 1
ischange = self.Point_Arrow(arrow_command, old = True)
if ischange:
command['Point'] = POS_old
#Lineๅ ้ค็ฎญๅคด
if Button_id == len(Button_Brush_Int) + 3:
Arrow_Name = ['Left', 'Up', 'Right', 'Down']
arrow_command = [0,0,0,0]
arrow_add = ''
if self.X - self.X_old == -1:
arrow_add = 'Left'
elif self.X - self.X_old == 1:
arrow_add = 'Right'
elif self.Y - self.Y_old == -1:
arrow_add = 'Up'
elif self.Y - self.Y_old == 1:
arrow_add = 'Down'
if arrow_add != '':
arrow_command[Arrow_Name.index(arrow_add)] = -1
ischange = self.Point_Arrow(arrow_command, old = True)
if ischange:
POS_old = (self.X_old, self.Y_old)
command['Point'] = POS_old
#้ๆฉๆจกๅผ
if Button_id == len(Button_Brush_Int):
if self.Selected['Type'] == 'Selecting':
self.Selected['Pos_End'] = POS
if Button_id == len(Button_Brush_Int):
if self.Selected['Type'] == 'Transforming':
self.Selected['Trans_Pos'] = self.Pos_Minus(POS, self.Selected['Trans_Dis'])
if command != {}:
self.A_Paint(command)
#้ผ ๆ ้ๆพ
def mouseReleaseEvent(self, Event):
#่ฎฐๅฝ
if self.file['Field'].data:
self.Record()
#็ปๆ้ๆฉ
if self.Selected['Type'] == 'Selecting':
self.Selected_Start()
#็ปๆmove
if self.Selected['Type'] == 'Transforming':
self.Selected['Type'] = 'Transform'
self.A_Paint({'Cursor':None})
#ๆ้ฎๆไปคๅๅ
def Button_Click(self, id):
#ๅจ้้ๆฉๆจกๅผ๏ผๆดๆขๆ้ฎ
if self.Selected['Type'] == 'None':
if id >= 0 and id < len(Button_Brush_Int) + 4:
#ๆดๆขๆ้ฎ
self.PARAMETER['Command']['Button'] = id
#ๅจ้ๆฉๆจกๅผไธ๏ผไธๆดๆขๆ้ฎ๏ผๆง่กfillๆไปค
if self.Selected['Type'] == 'Selected':
ischange = False
#fill panel
if id >= 0 and id < len(Button_Brush_Int):
panel_id = Button_Brush_Int[id]
ischange = self.Fill(panel_id)
#fill ๅ ้ค็ฎญๅคด
if id == len(Button_Brush_Int) + 1:
#ๅ ้ค็ฎญๅคดไฝฟ็จ็นๆฎid = 101
ischange = self.Fill(101)
#ๅๆถ้ๆฉ
if id == len(Button_Brush_Int) + 5:
self.Selected_Cancel()
if ischange:
#ๅจๅญ
self.Record()
#ๅฏนๅพๅ็ๆนๅ
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
command = {'Rec':Rec}
self.A_Paint(command)
#Transformๆจกๅผ
if self.Selected['Type'] == 'Transform':
#็กฎ่ฎคๅๆข
if id == len(Button_Brush_Int) + 10:
self.Transform_Ok()
#ๅๆถๅๆข
if id == len(Button_Brush_Int) + 11:
self.Transform_Cancel()
#่ช็ฑๅๆข
if id >= len(Button_Brush_Int) + 6 and id < len(Button_Brush_Int) + 10:
list = ['clockwise', 'anticlockwise', 'vertical', 'horizonal']
sign = list[id - len(Button_Brush_Int) - 6]
self.Free(sign)
'''ๅจๅญ'''
def Save(self, path):
if self.Is_Field() and path != '':
file_full = path
file_name = QtCore.QFileInfo(file_full).fileName()
#ๅจๅญๆไปถ
self.file['Field'].Save(file_full)
#ๆดๆน้
็ฝฎ
self.file_name = file_name
self.file_path = file_full
self.Save_Index = len(self.file['History']) - self.file['Pos']
#A_Command
a_command = {}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๅจๅญๆ ็ญพๅๆด
a_command['Tab'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
'''้ๆฉ'''
def Selected_Start(self):
self.Selected['Type'] = 'Selected'
def RePos(pos1, pos2):
x1 = pos1[0]
x2 = pos2[0]
y1 = pos1[1]
y2 = pos2[1]
posmin = (min(x1,x2), min(y1,y2))
posmax = (max(x1,x2), max(y1,y2))
return posmin, posmax
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
self.Selected['Pos_Start'], self.Selected['Pos_End'] = RePos(Pos1, Pos2)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
def Selected_Cancel(self):
#่ฎพ็ฝฎ
self.Selected['Type'] = 'None'
self.Selected['Type'] = 'None'
self.Selected['Pos_Start'] = (0,0)
self.Selected['Pos_End'] = (0,0)
#ๆดๆขๆ้ฎ
self.PARAMETER['Command']['Button'] = len(Button_Brush_Int)
#ๅ
ๆ ๆดๆฐ
command = {'Cursor':None}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
'''็ผ่พ'''
#ๅ็นๆ ผๅญๅๅ
def Point_Panel(self, panel_id):
pos = (self.X, self.Y)
ischange = self.file['Field'].Point_Panel(pos, panel_id)
if ischange:
self.file['Change'] = 1
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
if panel_id:
a_command['Status']['Last_Action'] = 'Brush ' + Panel_Name[Panel_Int.index(panel_id)]
else:
a_command['Status']['Last_Action'] = 'Delete Panels'
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
return ischange
#ๅ็น็ฎญๅคดๅๅ
def Point_Arrow(self, arrow_command = [0,0,0,0], old = False):
pos = (self.X, self.Y)
if old:
pos = (self.X_old, self.Y_old)
ischange = self.file['Field'].Point_Arrow(pos, arrow_command, self.PARAMETER['Img_parameter']['BackTrack'])
if ischange:
self.file['Change'] = 1
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
if -1 in arrow_command:
a_command['Status']['Last_Action'] = 'Delete Arrows'
elif 1 in arrow_command:
a_command['Status']['Last_Action'] = 'Draw Arrows'
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
return ischange
#ๅกซๅ
def Fill(self, panel_id):
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
ischange = self.file['Field'].Fill(Rec, panel_id, self.PARAMETER['Img_parameter']['BackTrack'])
if ischange:
self.file['Change'] = 1
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
if panel_id == 0:
a_command['Status']['Last_Action'] = 'Delete Panels'
elif panel_id == 101:
a_command['Status']['Last_Action'] = 'Delete Arrows'
else:
a_command['Status']['Last_Action'] = 'Fill ' + Panel_Name[Panel_Int.index(panel_id)]
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
return ischange
#ๅชๅ
def Cut(self):
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
data_new = self.file['Field'].Cut(Rec)
file_new = OFE_Field('create', data_new)
self.PARAMETER['Clipboard'] = file_new
#ๅจๅญ
if file_new.has_value():
self.file['Change'] = 1
self.Record()
#ๅพๅๆนๅ
self.Selected['Copy_Index'] = 1
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
command = {'Rec':Rec}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
a_command['Status']['Last_Action'] = 'Cut'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#ๅคๅถ
def Copy(self):
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
data_new = self.file['Field'].Copy(Rec)
file_new = OFE_Field('create', data_new)
self.PARAMETER['Clipboard'] = file_new
#ๅพๅๆนๅ
self.Selected['Copy_Index'] = 1
command = {'Cursor':None}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
a_command['Status']['Last_Action'] = 'Copy'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#็ฒ่ดด
def Paste(self):
Pos = self.Selected['Pos_Start']
section = self.PARAMETER['Clipboard']
self.file['Field'].Paste(Pos, section.data)
#ๆฐๅ
ๆ
x_new = min(Pos[0] + section.size()[0], self.file['Field'].size()[0]) - 1
y_new = min(Pos[1] + section.size()[1], self.file['Field'].size()[1]) - 1
self.Selected['Pos_End'] = (x_new, y_new)
#ๅจๅญ
self.file['Change'] = 1
self.Record()
##ๅพๅๆนๅ
self.Selected['Copy_Index'] = 0
command = {'All':None}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
a_command['Status']['Last_Action'] = 'Paste'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#ๅๆข
def Transform(self):
self.Selected['Type'] = 'Transform'
self.Selected['Type'] = 'Transform'
#ๅบๅๅชๅ๏ผๅๅ
ฅTrans_Field
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
data_new = self.file['Field'].Cut(Rec)
file_new = OFE_Field('create', data_new)
self.Selected['Trans_Field'] = file_new
#ๅๅงๅๆญคๅพ็
img_area = OFE_Image(self.Selected['Trans_Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
self.Selected['Trans_Img'] = img_area
#ๅๅงไฝ็ฝฎ่ฎพๅฎ
self.Selected['Trans_Pos'] = self.Selected['Pos_Start']
##ๅพๅๆนๅ
command = {'All':None}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#Duplicate
def Duplicate(self):
self.Selected['Type'] = 'Transform'
self.Selected['Type'] = 'Transform'
#ๅบๅๅชๅ๏ผๅๅ
ฅTrans_Field
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
data_new = self.file['Field'].Copy(Rec)
file_new = OFE_Field('create', data_new)
self.Selected['Trans_Field'] = file_new
#ๅๅงๅๆญคๅพ็
img_area = OFE_Image(self.Selected['Trans_Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
self.Selected['Trans_Img'] = img_area
#ๅๅงไฝ็ฝฎ่ฎพๅฎ
self.Selected['Trans_Pos'] = self.Selected['Pos_Start']
#็ถๆไธดๆถ่ฎฐๅฝ
self.Selected['Duplicate_Index'] = 1
##ๅพๅๆนๅ
command = {'All':None}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#Free
def Free(self, sign):
self.Selected['Trans_Field'].Free(sign)
#้็ปๆญคๅพ็
img_area = OFE_Image(self.Selected['Trans_Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
self.Selected['Trans_Img'] = img_area
##ๅพๅๆนๅ
command = {'Cursor':None}
self.A_Paint(command)
#็กฎ่ฎคๅๆข
def Transform_Ok(self):
#ๆฐๅฐๅพ
self.file['Field'].Paste(self.Selected['Trans_Pos'], self.Selected['Trans_Field'].data)
#ๅๆฐๅๅ
self.Selected['Type'] = 'Selected'
self.Selected['Pos_Start'] = self.Set_XY(self.Selected['Trans_Pos'])
pos_add = self.Pos_Add(self.Selected['Trans_Pos'], self.Selected['Trans_Field'].size())
pos_end = self.Set_XY((pos_add[0]-1, pos_add[1]-1))
self.Selected['Pos_End'] = pos_end
self.Selected['Trans_Field'] = None
self.Selected['Trans_Img'] = None
#็ถๆๅๆด
self.Selected['Duplicate_Index'] = 0
#ๅจๅญ
self.file['Change'] = 1
self.Record()
#ๅพๅๆนๅ
Pos1 = self.Selected['Pos_Start']
Pos2 = self.Selected['Pos_End']
Rec = [Pos1, Pos2]
command = {'Rec':Rec}
self.A_Paint(command)
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
if self.Selected['Duplicate_Index']:
a_command['Status']['Last_Action'] = 'Duplicate'
else:
a_command['Status']['Last_Action'] = 'Transform'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon': {}}
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#ๅๆถๅๆข
def Transform_Cancel(self):
#ๅๅงๅ
self.Selected['Type'] = 'Selected'
self.Selected['Trans_Field'] = None
self.Selected['Trans_Img'] = None
#่ฏปๅๅๅฒ
field = self.file['History'][-1]
self.file['Field'] = copy.deepcopy(field)
#้็ป
self.A_Paint({'All':None})
#็ถๆไธดๆถ่ฎฐๅฝ
self.Selected['Duplicate_Index'] = 0
#A_Command
a_command = {}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon': {}}
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
'''่ฎฐๅฝ'''
def Need_Save(self):
if not self.Is_Field():
return False
if self.Save_Index == len(self.file['History']) - self.file['Pos']:
return False
return True
#่ฎฐๅฝ
def Record(self):
if self.file['Change']:
self.file['Change'] = 0
#ๅ ้คๆชๆฅๅฒ
pos = self.file['Pos']
if pos > 1:
self.file['History'] = self.file['History'][:-pos+1]
self.file['Pos'] = 1
#ๅๅ
ฅๅๅฒ
self.file['History'].append(copy.deepcopy(self.file['Field']))
#ๅจๅญๆ ็ญพๅๆด
if len(self.file['History']) - self.file['Pos'] <= self.Save_Index:
self.Save_Index = -1
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๅจๅญๆ ็ญพๅๆด
a_command['Tab'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#ๆค้
def Undo(self):
history_len = len(self.file['History'])
if history_len > self.file['Pos']:
self.file['Pos'] += 1
pos = self.file['Pos']
field = self.file['History'][-pos]
self.file['Field'] = copy.deepcopy(field)
#้็ป
self.A_Paint({'All':None})
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
a_command['Status']['Last_Action'] = 'Undo'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๅจๅญๆ ็ญพๅๆด
a_command['Tab'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
#้ๅ
def Redo(self):
if self.file['Pos'] > 1:
self.file['Pos'] -= 1
pos = self.file['Pos']
field = self.file['History'][-pos]
self.file['Field'] = copy.deepcopy(field)
#้็ป
self.A_Paint({'All':None})
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
a_command['Status']['Last_Action'] = 'Redo'
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๅจๅญๆ ็ญพๅๆด
a_command['Tab'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
'''็ปๅถ'''
#็ปๅถๆปๅฝๆฐ
def A_Paint(self, command = {}):
self.Paint_Command = command
self.repaint()
self.Paint_Command = {}
#็ป็ฌไบไปถ
def paintEvent(self, event):
#ๅๅงๅ
command = self.Paint_Command
paint = QtGui.QPainter()
paint.begin(self)
#็ปๅฐๅพๆฌไฝ
PX = 128
zoom = self.PARAMETER['Img_parameter']['Zoom']
px = int(PX * zoom)
img = self.Img_Draw(command)
#็ปTransform
self.Transform_Draw(img, command)
#่ๆฏ
Img_Back = Image.new("RGBA", img.size, self.PARAMETER['Img_parameter']['Background'])
Img_Back.paste(img, (0,0), img.split()[3])
#ๅกซๅ
็ปๅธ
paint.drawPixmap(self.rect(), PIXMAP(Img_Back))
#็ป็บฟ
def DrawRec(px, Pen_Size, pos, posend = None):
Shrink = int(Pen_Size/2)
if posend == None:
x1 = pos[0]*px + Shrink
x2 = pos[0]*px + px - Shrink
y1 = pos[1]*px + Shrink
y2 = pos[1]*px + px - Shrink
else:
def RePos(pos1, pos2):
x1 = pos1[0]
x2 = pos2[0]
y1 = pos1[1]
y2 = pos2[1]
posmin = (min(x1,x2), min(y1,y2))
posmax = (max(x1,x2), max(y1,y2))
return posmin, posmax
pos1, pos2 = RePos(pos, posend)
x1 = pos1[0]*px + Shrink
x2 = pos2[0]*px + px - Shrink
y1 = pos1[1]*px + Shrink
y2 = pos2[1]*px + px - Shrink
paint.drawLine(x1, y1, x1, y2)
paint.drawLine(x1, y1, x2, y1)
paint.drawLine(x2, y2, x1, y2)
paint.drawLine(x2, y2, x2, y1)
#็ป็บฟๅผๅง
if self.Is_Field():
#ๅจ้ๆฉไบ้กนๅบ็ฐๆถ็ปๅถ้ๆฉๆนๆก
if self.Selected['Type'] == 'Selecting' or self.Selected['Type'] == 'Selected':
Pen_Size = 2
if self.Selected['Copy_Index']:
pen = QtGui.QPen(QtCore.Qt.blue, Pen_Size, QtCore.Qt.SolidLine)
else:
pen = QtGui.QPen(QtCore.Qt.green, Pen_Size, QtCore.Qt.SolidLine)
paint.setPen(pen)
DrawRec(px, Pen_Size, self.Selected['Pos_Start'], self.Selected['Pos_End'])
#ๅจๅๆขไบ้กนๅบ็ฐๆถ็ปๅถ้ป่ฒๆนๆก
if self.Selected['Type'] == 'Transform' or self.Selected['Type'] == 'Transforming':
Pen_Size = 2
pen = QtGui.QPen(QtCore.Qt.yellow, Pen_Size, QtCore.Qt.SolidLine)
size = self.Selected['Trans_Field'].size()
pos_start = self.Selected['Trans_Pos']
pos_end = (pos_start[0]+size[0]-1, pos_start[1]+size[1]-1)
paint.setPen(pen)
DrawRec(px, Pen_Size, pos_start, pos_end)
#ๅช่ฆไธๅจ้ๆฉ่ฟ่กไธญๅฐฑ็ปๅถๅ
ๆ
if self.Selected['Type'] == 'None' or self.Selected['Type'] == 'Selected' or self.Selected['Type'] == 'Transform':
Pen_Size = 2
pen = QtGui.QPen(QtCore.Qt.red, Pen_Size, QtCore.Qt.SolidLine)
paint.setPen(pen)
DrawRec(px, Pen_Size, (self.X, self.Y))
paint.end()
def Transform_Draw(self, img, command = {}):
#ๅจTransformๅบ็ฐๆถ่ฎฉ่ๆฏๅๆ๏ผๅนถ็ปๅถ่ขซ้ๆฉ็ๅบๅ
if self.Selected['Type'] == 'Transform' or self.Selected['Type'] == 'Transforming':
PX = 128
zoom = self.PARAMETER['Img_parameter']['Zoom']
px = int(PX * zoom)
#ๆ นๆฎๆไปค้็ปTransform
if 'Transform_Redraw' in command:
img_area = OFE_Image(self.Selected['Trans_Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
self.Selected['Trans_Img'] = img_area
mask = Image.new("RGBA", img.size,(0,0,0,128))
img.paste(mask, (0,0), mask.split()[3])
img_area = self.Selected['Trans_Img']
pos = self.Selected['Trans_Pos']
img.paste(img_area, (pos[0]*px, pos[1]*px), img_area.split()[3])
# ็ๆImg็ๆปๅฝๆฐ
def Img_Draw(self, command = {}):
if not self.Is_Field():
img = self.Init_Draw()
else:
img = None
#้็ปๆดๅผ ๅพ
if 'All' in command:
img = self.Main_Draw()
#้็ปไธไธชๆ ผๅญ
if 'Point' in command:
Point_Pos = command['Point']
img = self.Point_Draw(Point_Pos)
#้็ปไธไธช็ฉๅฝขๅบๅ
if 'Rec' in command:
Rec = command['Rec']
img = self.Rec_Draw(Rec)
#็ปๅพๅฎๆฏ๏ผไฟๅญๅฐๅพๅฐself
if img:
self.Field_Img = img
img = self.Field_Img
#ๅ
จ้จ็ปๅถๅฎๆ๏ผๅบๅฎ็ชๅฃๅคงๅฐ๏ผ่ฟๅๅพ็
self.setFixedSize(img.size[0],img.size[1])
return copy.deepcopy(img)
# main็ป
def Main_Draw(self):
#็ปๅฐๅพๆฌไฝ
img = OFE_Image(self.file['Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
return img
#ๅๆ ผ็ป
def Point_Draw(self, pos):
#ไป็ฐๆๅพ็ๆนๅๅไธชๆ ผๅญ
img = OFE_Image(self.file['Field'], self.Graphics, self.PARAMETER['Img_parameter']).Point(self.Field_Img, pos)
return img
#็ฉๅฝข็ป
def Rec_Draw(self, rec):
#ไป็ฐๆๅพ็ๆนๅไธไธชๅบๅ๏ผๆๆถๅ
จ็ป๏ผ
img = OFE_Image(self.file['Field'], self.Graphics, self.PARAMETER['Img_parameter']).Main()
return img
# Logo็ป
def Init_Draw(self):
img = Image.open(path0 + '/'+ 'title_logo.png')
zoom = self.PARAMETER['Img_parameter']['Zoom'] * 3
newsize = (int(img.size[0]*zoom), int(img.size[1]*zoom))
img = img.resize(newsize, Image.BICUBIC)
img_main = Image.new("RGBA", img.size,self.PARAMETER['Img_parameter']['Background'])
img_main.paste(img,(0,0),img.split()[3])
return img_main
###็ถๆๅๅ###
#่ๅๅๅ
def Menu_Change(self):
#ๅจๅญ
if self.Is_Field() and self.Selected['Type'] != 'Transform':
self.PARAMETER['Menu_able']['Save_As'] = 0
if self.Need_Save():
self.PARAMETER['Menu_able']['Save'] = 0
else:
self.PARAMETER['Menu_able']['Save'] = 1
else:
self.PARAMETER['Menu_able']['Save_As'] = 1
self.PARAMETER['Menu_able']['Save'] = 1
#ๆค้
if len(self.file['History']) == self.file['Pos'] or len(self.file['History']) == 0:
self.PARAMETER['Menu_able']['Undo'] = 1
else:
self.PARAMETER['Menu_able']['Undo'] = 0
#้ๅ
if self.file['Pos'] <= 1:
self.PARAMETER['Menu_able']['Redo'] = 1
else:
self.PARAMETER['Menu_able']['Redo'] = 0
#ๅชๅๅคๅถ็ฒ่ดดๅๆข
if self.Selected['Type'] == 'Selected':
self.PARAMETER['Menu_able']['Cut'] = 0
self.PARAMETER['Menu_able']['Copy'] = 0
self.PARAMETER['Menu_able']['Transform'] = 0
self.PARAMETER['Menu_able']['Duplicate'] = 0
if self.PARAMETER['Clipboard']:
self.PARAMETER['Menu_able']['Paste'] = 0
else:
self.PARAMETER['Menu_able']['Paste'] = 1
else:
self.PARAMETER['Menu_able']['Cut'] = 1
self.PARAMETER['Menu_able']['Copy'] = 1
self.PARAMETER['Menu_able']['Paste'] = 1
self.PARAMETER['Menu_able']['Transform'] = 1
self.PARAMETER['Menu_able']['Duplicate'] = 1
#ๅๆขๆถๅ
จ้จ็ฆ็จ
if self.Selected['Type'] == 'Transform':
self.PARAMETER['Menu_able']['Undo'] = 1
self.PARAMETER['Menu_able']['Redo'] = 1
self.PARAMETER['Menu_able']['Cut'] = 1
self.PARAMETER['Menu_able']['Copy'] = 1
self.PARAMETER['Menu_able']['Paste'] = 1
self.PARAMETER['Menu_able']['Transform'] = 1
self.PARAMETER['Menu_able']['Duplicate'] = 1
#A็ถๆๅๅ
def A_Status(self, command = {}):
#Historyๆฐ็ฎ
command['History_Len'] = len(self.file['History'])
command['History_Pos'] = self.file['Pos']
#้ๆฉ่ๅด
if self.Selected['Type'] == 'Selected':
pos_start = self.Selected['Pos_Start']
pos_end = self.Selected['Pos_End']
command['Selected'] = [pos_start, pos_end]
elif self.Selected['Type'] == 'None':
command['Selected'] = []
return command
#Aๆ้ฎๅพๆ ๆดๆฐ
def A_Button(self, command = {}):
#ๆป็ถๆ
command['Type'] = self.Selected['Type']
return command
#็ปๆฟๆกๆถ
class Canvas_Frame(QtWidgets.QWidget):
def __init__(self, field, PARAMETER, App = None, file_name = 'Title', file_path = '', parent = None):
super(Canvas_Frame,self).__init__(parent)
self.init(field, PARAMETER, App, file_name, file_path)
def init(self, field, PARAMETER, App = None, file_name = 'Title', file_path = ''):
# Label ๅไปฃ็
#็ถๆๆก
self.statue = QtWidgets.QStatusBar(self)
self.statue.showMessage("")
#็ปๆฟ
self.canvas = Canvas(field, PARAMETER, self.statue, App, file_name, file_path)
#ๆปๅจๆก
scroll = QtWidgets.QScrollArea()
scroll.setWidget(self.canvas)
scroll.setAutoFillBackground(True)
scroll.setWidgetResizable(True)
#ๆๅ
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(scroll)
vbox.addWidget(self.statue)
self.setLayout(vbox)
def Is_Field(self):
return self.canvas.Is_Field()
def Field(self):
return self.canvas.Field()
def file_name(self):
return self.canvas.file_name
def file_path(self):
return self.canvas.file_path
def Need_Save(self):
return self.canvas.Need_Save()
def Menu_Change(self):
self.canvas.Menu_Change()
def A_Status(self, command = {}):
self.canvas.A_Status(command)
def Button_Click(self ,id):
self.canvas.Button_Click(id)
def A_Button(self, command = {}):
self.canvas.A_Button(command)
def Save(self, path):
self.canvas.Save(path)
def Undo(self):
self.canvas.Undo()
def Redo(self):
self.canvas.Redo()
def Cut(self):
self.canvas.Cut()
def Copy(self):
self.canvas.Copy()
def Paste(self):
self.canvas.Paste()
def Transform(self):
self.canvas.Transform()
def Duplicate(self):
self.canvas.Duplicate()
def width(self):
return self.canvas.width()
def height(self):
return self.canvas.height()
#็ปๆฟTab
class Canvas_Tab(QtWidgets.QTabWidget):
TabEmitApp = QtCore.pyqtSignal()
def __init__(self, PARAMETER, App = None, parent = None):
super(Canvas_Tab,self).__init__(parent)
self.init(PARAMETER, App)
def init(self, PARAMETER, App = None):
#ๅๅงๅ
self.PARAMETER = PARAMETER
self.App = App
#ๅๅง็ปๆฟ
self.Canvas_List = []
self.Insert_Canvas()
#ๆฃๆตTabๅ็ๅๅ
self.currentChanged.connect(self.OnChange)
#ๆดๆฐTabๆๆฌ
self.TabEmitApp.connect(self.Tab_Refresh)
def Insert_Canvas(self, field = None, file_name = 'Title', file_path = ''):
if field:
self.PARAMETER['Menu_able']['Close'] = 0
else:
field = OFE_Field()
#ๆฐ็ปๆฟ
canvas_new = Canvas_Frame(field, self.PARAMETER, self.App, file_name, file_path)
#็ฌฌไธไธช้ๅฐๅพๅปๆ
if self.Canvas_List != []:
if not self.Canvas_List[0].Is_Field():
self.removeTab(0)
self.Canvas_List = []
#ๅๅปบๆฐTab
id = self.count()
self.Canvas_List.append(canvas_new)
self.insertTab(id, canvas_new, file_name)
#ๅฐๆฐ็ชๅฃๅฏนๅ
self.setCurrentIndex(id)
#A_Command
a_command = {}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#ๅจๅญๆ ็ญพๅๆด
a_command['Tab'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
def Remove_Canvas(self):
if self.Canvas_List == []:
return
if not self.Canvas_List[0].Is_Field():
return
current_id = self.currentIndex()
file_name = self.Canvas_List[current_id].file_name()
self.Canvas_List.pop(current_id)
self.removeTab(current_id)
if self.Canvas_List == []:
self.Insert_Canvas()
#A_Command
a_command = {}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
return file_name
def Is_Field(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].Is_Field()
def Field(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].Field()
def file_name(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].file_name()
def file_path(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].file_path()
def Need_Save(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].Need_Save()
def Save(self, path):
current_id = self.currentIndex()
self.Canvas_List[current_id].Save(path)
def Undo(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Undo()
def Redo(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Redo()
def Cut(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Cut()
def Copy(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Copy()
def Paste(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Paste()
def Transform(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Transform()
def Duplicate(self):
current_id = self.currentIndex()
self.Canvas_List[current_id].Duplicate()
def width(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].width()
def height(self):
current_id = self.currentIndex()
return self.Canvas_List[current_id].height()
def A_Paint(self, command):
current_id = self.currentIndex()
self.Canvas_List[current_id].canvas.A_Paint(command)
def Menu_Change(self):
if self.Canvas_List != []:
if not self.Canvas_List[0].Is_Field():
#ๅ
ณ้ญ่ๅๅๅจ
self.PARAMETER['Menu_able']['Close'] = 1
#่ฐ็จไธ็บง
current_id = self.currentIndex()
self.Canvas_List[current_id].Menu_Change()
def A_Status(self, command = {}):
#่ฐ็จไธ็บง
current_id = self.currentIndex()
self.Canvas_List[current_id].A_Status(command)
def Button_Click(self, id):
#่ฐ็จไธ็บง
current_id = self.currentIndex()
self.Canvas_List[current_id].Button_Click(id)
def A_Button(self, command = {}):
#่ฐ็จไธ็บง
current_id = self.currentIndex()
self.Canvas_List[current_id].A_Button(command)
#ๆนๅTabๆๆฌ
def Tab_Refresh(self):
current_id = self.currentIndex()
text = self.Canvas_List[current_id].file_name()
if self.Canvas_List[current_id].Need_Save():
text += '*'
self.setTabText(current_id, text)
#Tabๅๆขๆถ
def OnChange(self, Event):
#ๆนๅๆไปถindex
if Event >= 0:
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#่ๅๆ ๆดๆฐ
a_command['Menu'] = None
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,075
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/__init__.py
|
from .OFE_Panels import Panel_Int, Panel_Name, Button_Brush_Int
from .OFE_Field import OFE_Field
from .OFE_Buttoms import ButtonWindow
from .OFE_Status import StatusWindow
from .OFE_Canvas import Canvas_Tab
from .OFE_Files import OFE_Upload, OFE_New, OFE_Files
from .OFE_Graphics import OFE_Graphics
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,076
|
zirconium-n/OFE
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(name='OrangeFieldEditor',
version='0.1.4',
description='100% Orange Field Editor',
url='https://github.com/zirconium-n/OFE',
author='lhw & sgk',
license='MIT',
packages=['OFE'],
install_requires=[
'PyQt5',
'pillow'
],
scripts=['bin/OFE.bat'],
zip_safe=False,
include_package_data=True)
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,077
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Image.py
|
#OFE_Image
from PIL import Image
import sys, os
from OFE import Panel_Int, Panel_Name
import time
#ๆ น็ฎๅฝ
path0 = os.path.dirname(__file__)
##ๅๅงๅ ่ฝฝๅพ็
# ๅพ็ๆ ๅฐ่กจ
Panel_Dict = {}
for i, id in enumerate(Panel_Int):
Panel_Dict[id] = Panel_Name[i]
Arrow_Name = ['Left', 'Up', 'Right', 'Down']
class OFE_Image():
def __init__(self, field, Graphics = None, Img_parameter = None):
self.field = field
self.Graphics = Graphics
self.Img_parameter = Img_parameter
def PX_Image(self):
#ๆ็
งlist[y][x]ๅถไฝๆฐImg
def New_Px(DATA):
type = "RGBA"
if len(DATA[0][0]) == 3:
type = "RGB"
Y = len(DATA)
X = len(DATA[0])
Img = Image.new("RGBA", (X,Y),(0,0,0,0))
PUT_DATA = []
for raw in DATA:
PUT_DATA += raw
Img.putdata(PUT_DATA)
return Img
#้ข่ฒๅ่กจ
COLOR = {}
COLOR['Neutral'] = (214,214,214,256)
COLOR['Encounter'] = (255,115,109,256)
COLOR['Encounter_2'] = (255,115,109,256)
COLOR['Draw'] =(104,255,138,256)
COLOR['Draw_2'] = (0,246,88,256)
COLOR['Bonus'] = (254,222,110,256)
COLOR['Bonus_2'] = (253,186,31,256)
COLOR['Drop'] = (109,164,255,256)
COLOR['Drop_2'] = (0,96,246,256)
COLOR['Warp'] = (198,61,255,256)
COLOR['WarpMove'] = (198,61,255,256)
COLOR['WarpMove_2'] = (198,61,255,256)
COLOR['Move'] = (73,206,180,256)
COLOR['Move_2'] = (73,206,180,256)
COLOR['PLAYER1'] = (254,198,149,256)
COLOR['PLAYER2'] = (187,223,255,256)
COLOR['PLAYER3'] = (181,255,178,256)
COLOR['PLAYER4'] = (254,242,156,256)
px = 2
DATA = []
size = self.field.size()
#้ขๅกซๅ
for y in range(px*size[1]):
DATA.append([])
for x in range(px*size[0]):
DATA[y].append((0,0,0,0))
#ๅกซๅ
for y in range(size[1]):
for x in range(size[0]):
panel_id = self.field.data[y][x][0]
panel_name = Panel_Name[Panel_Int.index(panel_id)]
if panel_name in COLOR:
for j in range(px):
for i in range(px):
DATA[y*px+j][x*px+i] = COLOR[panel_name]
elif panel_name == 'Check':
DATA[y*px+0][x*px+0] = COLOR['PLAYER1']
DATA[y*px+1][x*px+0] = COLOR['PLAYER2']
DATA[y*px+0][x*px+1] = COLOR['PLAYER3']
DATA[y*px+1][x*px+1] = COLOR['PLAYER4']
img = New_Px(DATA)
return img
def Main(self):
Img = self.Panels()
if self.Img_parameter['Show_arrows'] == 1:
Arrow = self.Arrows()
Img.paste(Arrow,(0,0),Arrow.split()[3])
return Img
def Point(self, Img, pos):
zoom = self.Img_parameter['Zoom']
PX = 128
px = int(PX * zoom)
size = self.field.size()
size_img = map(lambda x: x * px, size)
x = pos[0]
y = pos[1]
#็ปๅไธชๆ ผๅญ
Img_this = Image.new("RGBA", (px, px), self.Img_parameter['Background'])
panel_id = self.field.data[y][x][0]
if panel_id:
Img_Panel = self.Graphics.get_image('Panel_' + Panel_Dict[panel_id], zoom)
Img_this.paste(Img_Panel, (0,0), Img_Panel.split()[3])
if self.Img_parameter['Show_arrows'] == 1:
#้่ฆ็ป็็ฎญๅคด
#ๆฏๅฆๅๅ
backtrack = self.Img_parameter['BackTrack']
if backtrack:
CONST = 16
else:
CONST = 1
Arrows = []
for i in range(4):
if int(self.field.data[y][x][1] / (2**i) / CONST) % 2:
Arrows.append(i)
for arrow_num in Arrows:
Img_Arrow = self.Graphics.get_image('Arrow_' + Arrow_Name[arrow_num], zoom)
Img_this.paste(Img_Arrow, (0,0) ,Img_Arrow.split()[3])
Img.paste(Img_this, (px*x,px*y))
return Img
def Panels(self):
zoom = self.Img_parameter['Zoom']
PX = 128
px = int(PX * zoom)
size = self.field.size()
size_img = map(lambda x: x * px, size)
#ไฝๅพ
Img = Image.new("RGBA", tuple(size_img), (0,0,0,0))
for y in range(size[1]):
for x in range(size[0]):
panel_id = self.field.data[y][x][0]
if panel_id != 0 :
Img_Panel = self.Graphics.get_image('Panel_' + Panel_Dict[panel_id], zoom)
Img.paste(Img_Panel,(px*x,px*y),Img_Panel.split()[3])
return Img
def Arrows(self):
zoom = self.Img_parameter['Zoom']
PX = 128
px = int(PX * zoom)
size = self.field.size()
size_img = map(lambda x: x * px, size)
#ไฝๅพ
Img = Image.new("RGBA", tuple(size_img), (0,0,0,0))
#ๅ็งปๅ
ณ็ณป
# shift = 18
# x_shift = [-shift, 0, shift, 0]
# y_shift = [0, -shift, 0, shift]
#ๆฏๅฆๅๅ
backtrack = self.Img_parameter['BackTrack']
if backtrack:
CONST = 16
else:
CONST = 1
for y in range(size[1]):
for x in range(size[0]):
#้่ฆ็ป็็ฎญๅคด
Arrows = []
for i in range(4):
if int(self.field.data[y][x][1] / (2**i) / CONST) % 2:
Arrows.append(i)
for arrow_num in Arrows:
Img_Arrow = self.Graphics.get_image('Arrow_' + Arrow_Name[arrow_num], zoom)
Img.paste(Img_Arrow,(px*x,px*y),Img_Arrow.split()[3])
return Img
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,078
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Buttoms.py
|
#OFE_Buttoms
import sys, os
from PIL import Image
from PIL.ImageQt import ImageQt
from PyQt5 import QtGui, QtWidgets, QtCore
from OFE import Panel_Int, Panel_Name
#ๆ น็ฎๅฝ
path0 = os.path.dirname(os.path.realpath(sys.argv[0]))
#ๅพ็ๆ ผๅผ่ฝฌๆข
def ICON(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
icon = QtGui.QIcon(pixmap)
return icon
class ButtonWindow(QtWidgets.QWidget):
ButtonApp = QtCore.pyqtSignal(int)
def __init__(self, PARAMETER, App = None, parent = None):
QtWidgets.QWidget.__init__(self, parent)
#ๅๅงๅ
self.PARAMETER = PARAMETER
self.App = App
graphics = PARAMETER['Graphics']
print(PARAMETER)
###่ฎพ็ฝฎๆ้ฎ(ๆฐ)
#ๆ้ฎ็ชๅฃlayout
layout_main = QtWidgets.QVBoxLayout()
#ๆ้ฎๅพๆ
self.Button_icon = {}
#ๆปQButtonGroup
self.ButtonGroup = QtWidgets.QButtonGroup()
self.ButtonGroup.buttonClicked.connect(self.Button_Click)
#่ฎพ็ฝฎ็ชๅฃ
self.ButtonWidget = []
#PX
zoom = self.PARAMETER['Img_parameter']['Button_Zoom']
PX = 128
px = int(PX * zoom)
#ๅๅปบๅไธช็ชๅฃๅๆ้ฎ
'''
for i, type_ in enumerate(self.PARAMETER['Button']['Type']):
self.ButtonWidget.append(QtWidgets.QWidget())
grid = QtWidgets.QGridLayout()
for j, name in enumerate(self.PARAMETER['Button']['Specific'][i]):
id = 100 * i + j
self.Button_icon[id] = []
#ๆ้ฎๅพๆ ๅ ่ฝฝ๏ผ0 ไธๅค็๏ผ1 ๆไธ๏ผ 2 ไฝๅ
๏ผ 3 ๆ ๅพ
img_o = graphics.get_image(type_ + '_' + name)
img0 = Image.new("RGBA", (PX,PX),(0,0,0,256))
img0.paste(img_o, (0,0), img_o.split()[3])
self.Button_icon[id].append(img0)
img1 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img1.paste(img_o, (2,2), img_o.split()[3])
self.Button_icon[id].append(img1)
mask = Image.new("RGBA", (PX,PX),(0,0,0,64))
img2 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img2.paste(img_o, (0,0), img_o.split()[3])
img2.paste(mask, (0,0), mask.split()[3])
self.Button_icon[id].append(img2)
img3 = Image.new("RGBA", (PX,PX),(0,0,0,0))
self.Button_icon[id].append(img3)
#ๅๅปบๆ้ฎ
button = QtWidgets.QPushButton()
button.setFixedWidth(px)
button.setFixedHeight(px)
button.setIcon(ICON(self.Button_icon[id][2])) #้ป่ฎคไฝๅ
button.setIconSize(QtCore.QSize(px,px))
#็ปๅฎgroup
self.ButtonGroup.addButton(button, id)
#gridไฝ็ฝฎ
y = j/6
x = j%6
grid.addWidget(button, y, x)
grid.setHorizontalSpacing(0)
grid.setVerticalSpacing(0)
self.ButtonWidget[i].setLayout(grid)
layout_main.addWidget(self.ButtonWidget[i])
self.setLayout(layout_main)
'''
#่ฎพ็ฝฎๆ้ฎ(ๆง)
#่ฎพ็ฝฎๅทๅญ็ฑปๆ้ฎ
Button_Brush_Int = [0,2,5,9,6,
10,3,20,4,8,
21,22,23,24,7,
25,1,18,26,27
,28,31,32,33]
#้ผ ๆ ็ฑปๆ ๅฐ่กจ
Mouse_Name = ['Mouse', 'ArrowDelete', 'ArrowLine', 'ArrowLineDelete', 'OK', 'Cancel']
#ๅๆข็ฑปๆ ๅฐ่กจ
Transform_Name = ['Clock_test', 'AntiClock_test', 'Vertical_test', 'Horizonal_test', 'OK', 'Cancel']
#ๆ้ฎๅค็งๅพ็
#ๆฎ้
self.Button_0 = []
#ๆไธ
self.Button_1 = []
#ไฝๅ
self.Button_2 = []
#ๆ
self.Button_3 = []
panel_count = len(Panel_Int)
button_count = 6
transform_count = 6
for id in range(panel_count + button_count + transform_count):
if id < panel_count:
img_o = graphics.get_image('Panel_' + Panel_Name[Panel_Int.index(Button_Brush_Int[id])])
#Image.open(path0 + r'\panels\Panel_' + Panel_Name[Panel_Int.index(Button_Brush_Int[id])] + '.png')
img0 = Image.new("RGBA", (PX,PX),(0,0,0,256))
img0.paste(img_o, (0,0), img_o.split()[3])
self.Button_0.append(img0)
img1 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img1.paste(img_o, (2,2), img_o.split()[3])
self.Button_1.append(img1)
mask = Image.new("RGBA", (PX,PX),(0,0,0,64))
img2 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img2.paste(img_o, (0,0), img_o.split()[3])
img2.paste(mask, (0,0), mask.split()[3])
self.Button_2.append(img2)
img3 = Image.new("RGBA", (PX,PX),(0,0,0,0))
self.Button_3.append(img3)
elif id < panel_count + button_count:
id -= panel_count
img_o = graphics.get_image('Button_' + Mouse_Name[id]) #Image.open(path0 + r'\panels\Button_' + Mouse_Name[id] + '.png')
print(Mouse_Name[id], id)
img0 = Image.new("RGBA", (PX,PX),(0,0,0,256))
img0.paste(img_o, (0,0), img_o.split()[3])
self.Button_0.append(img0)
img1 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img1.paste(img_o, (2,2), img_o.split()[3])
self.Button_1.append(img1)
mask = Image.new("RGBA", (PX,PX),(0,0,0,64))
img2 = Image.new("RGBA", (PX,PX),(256,256,256,256))
img2.paste(img_o, (0,0), img_o.split()[3])
img2.paste(mask, (0,0), mask.split()[3])
self.Button_2.append(img2)
img3 = Image.new("RGBA", (PX,PX),(0,0,0,0))
self.Button_3.append(img3)
elif id < panel_count + button_count + transform_count:
id -= panel_count + button_count
img_o = graphics.get_image('Transform_' + Transform_Name[id]) #Image.open(path0 + r'\panels\Transform_' + Transform_Name[id] + '.png')
print(Transform_Name[id], id)
img0 = Image.new("RGBA", (PX,PX),(0,0,0,256))
img0.paste(img_o, (0,0), img_o.split()[3])
self.Button_0.append(img0)
#ๆ้ฎList
self.Button_List = []
button_grid_all = QtWidgets.QVBoxLayout()
#ๅทๅญ็ฑป
brush_grid = QtWidgets.QGridLayout()
def wrapper(ind):
def q():
self.Button_Click(ind)
return q
for id in range(panel_count):
self.Button_List.append(QtWidgets.QPushButton())
self.Button_List[id].setFixedWidth(px)
self.Button_List[id].setFixedHeight(px)
self.Button_List[id].setIcon(ICON(self.Button_2[id]))
self.Button_List[id].setIconSize(QtCore.QSize(px,px))
self.Button_List[id].setStatusTip('Draw ' + Panel_Name[Panel_Int.index(Button_Brush_Int[id])] + ' Panel')
self.Button_List[id].clicked.connect(wrapper(id))
brush_grid.setHorizontalSpacing(0)
brush_grid.setVerticalSpacing(0)
#่ฎพ็ฝฎ้ผ ๆ ็ฑปๆ้ฎ
#้ผ ๆ ๅทฅๅ
ท#ๅผบๅ ็ฎญๅคดๅทฅๅ
ท#็ป็ฎญๅคดๅทฅๅ
ท#ๆฆ้ค็ฎญๅคดๅทฅๅ
ท#็กฎ่ฎค#ๅๆถ
mouse_grid = QtWidgets.QGridLayout()
print("INIT", len(self.Button_List), panel_count)
for id in range(6):
buttonid = panel_count + id
self.Button_List.append(QtWidgets.QPushButton())
self.Button_List[buttonid].setFixedWidth(px)
self.Button_List[buttonid].setFixedHeight(px)
self.Button_List[buttonid].setIcon(ICON(self.Button_2[buttonid]))
self.Button_List[buttonid].setIconSize(QtCore.QSize(px,px))
self.Button_List[buttonid].setStatusTip(Mouse_Name[id])
self.Button_List[buttonid].clicked.connect(wrapper(buttonid))
#่ฎพ็ฝฎๅๆข็ฑปๆ้ฎ
transform_grid = QtWidgets.QGridLayout()
CONST = panel_count + button_count
for id in range(6):
buttonid = CONST + id
self.Button_List.append(QtWidgets.QPushButton())
self.Button_List[buttonid].setFixedWidth(px)
self.Button_List[buttonid].setFixedHeight(px)
self.Button_List[buttonid].setIcon(ICON(self.Button_0[buttonid]))
self.Button_List[buttonid].setIconSize(QtCore.QSize(px,px))
self.Button_List[buttonid].setStatusTip(Mouse_Name[id])
self.Button_List[buttonid].clicked.connect(wrapper(buttonid))
for id in range(CONST + transform_count):
y, x = id // 6, id % 6
if (id < panel_count):
brush_grid.addWidget(self.Button_List[id], y, x)
elif (id < CONST):
mouse_grid.addWidget(self.Button_List[id], 0, id - panel_count)
else:
transform_grid.addWidget(self.Button_List[id], 0, id - CONST)
button_grid_all.addLayout(brush_grid)
button_grid_all.addLayout(mouse_grid)
button_grid_all.addLayout(transform_grid)
#่ฎพ็ฝฎๆดไฝๆกๆถ
self.setLayout(button_grid_all)
#ๅๅงๅๆ้ฎๅพๆ
self.Button_Icon_Change()
#ๅฟซๆท้ฎ
self.Button_List[0].setShortcut('Delete')
self.Button_List[panel_count + 10].setShortcut('Return')
self.Button_List[panel_count + 5].setShortcut('Esc')
self.Button_List[panel_count + 11].setShortcut('Esc')
#ๆต่ฏ
def Button_Click(self, id):
#print
print(id)
#ๆงๆ้ฎๆ ่ฎฐ
id_old = self.PARAMETER['Command']['Button']
#ๆ้ฎๆไธๅๅฐไฟกๅท
self.App['Button'].emit(id)
id_new = self.PARAMETER['Command']['Button']
#A_Command
a_command = {}
#็ถๆๅๆด
a_command['Status'] = {}
#ๆ้ฎๅพๆ ๅๆด
a_command['Button'] = {'Icon':{}}
#A_Commandไฟกๅทๅๅฐ
self.App['Command'].emit(a_command)
def A_Button(self, command):
if 'Zoom' in command:
self.Button_Zoom_Change()
if 'Icon' in command:
self.Button_Icon_Change(command['Icon'])
def Button_Icon_Change(self, command = {'Type': 'None'}):
#่ฎพ็ฝฎๅพๆ
#้ๆฉๆ้ฎๆ ทๅผๅๅงๅ
magic = len(Panel_Int)
def Selected_Button_Icon():
list = []
for i in range(magic + 6):
list.append(0)
list[magic] = 1
list[magic + 2] = 3
list[magic + 3] = 3
return list
def Init_Button_Icon():
list = []
for i in range(magic + 6):
list.append(2)
list[magic + 4] = 3
list[magic + 5] = 3
return list
button_icon = []
#ๅคไบ้ๅฎ็ถๆไธ
if command['Type'] == 'Selected':
button_icon = Selected_Button_Icon()
#ๅคไบไธ่ฌ็ถๆไธ
elif command['Type'] == 'None':
button_icon = Init_Button_Icon()
button_id = self.PARAMETER['Command']['Button']
button_icon[button_id] = 1
#ๆดๆขๅพๆ
for i, type in enumerate(button_icon):
if type == 0:
self.Button_List[i].setIcon(ICON(self.Button_0[i]))
if type == 1:
self.Button_List[i].setIcon(ICON(self.Button_1[i]))
if type == 2:
self.Button_List[i].setIcon(ICON(self.Button_2[i]))
if type == 3:
self.Button_List[i].setIcon(ICON(self.Button_3[i]))
#Transformๆ้ฎๅจTransformๅฝขๆไธๆพ็คบ
if command['Type'] == 'Transform':
for i in range(magic + 6):
self.Button_List[i].hide()
for i in range(magic + 6, magic + 12):
self.Button_List[i].show()
else:
for i in range(magic + 6):
self.Button_List[i].show()
for i in range(magic + 6, magic + 12):
self.Button_List[i].hide()
#ๆดๆขๆ้ฎๅคงๅฐ
def Button_Zoom_Change(self):
PX = 128
Button_Zoom = self.PARAMETER['Img_parameter']['Button_Zoom']
px = int(PX * Button_Zoom)
for button in self.Button_List:
button.setFixedWidth(px)
button.setFixedHeight(px)
button.setIconSize(QtCore.QSize(px,px))
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,079
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Files.py
|
import sys, os
import zipfile
from PIL import Image
from PIL.ImageQt import ImageQt
from PyQt5 import QtWidgets, QtCore, QtGui
from OFE.OFE_Field import OFE_Field
from OFE.OFE_Image import OFE_Image
import tempfile
import shutil
#็งป้คpakไธญ็ๆๅฎๆไปถ
def remove_from_zip(zipfname, *filenames):
tempdir = tempfile.mkdtemp()
try:
tempname = os.path.join(tempdir, 'new.zip')
with zipfile.ZipFile(zipfname, 'r') as zipread:
with zipfile.ZipFile(tempname, 'w') as zipwrite:
for item in zipread.infolist():
if item.filename not in filenames:
data = zipread.read(item.filename)
zipwrite.writestr(item, data)
shutil.move(tempname, zipfname)
finally:
shutil.rmtree(tempdir)
#ๆ น็ฎๅฝ
path0 = os.path.dirname(__file__)
class OFE_Upload(QtWidgets.QDialog):
def __init__(self, game_path, field, parent = None):
super(OFE_Upload, self).__init__()
self.game_path = game_path
self.field_now = field
#ๅๅงๅ
self.setWindowTitle("Upload Manager")
self.setGeometry(600, 100, 900, 600)
#ๆฌๅฐๅฐๅพๅ่กจ
self.Local_Field_Dict = self.Open_Fields(path = path0 + '/'+ 'fields.pak')
#ๆธธๆๅฐๅพๅ่กจ
self.Game_Field_Dict = self.Open_Fields(path = game_path)
def ICON(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
icon = QtGui.QIcon(pixmap)
return icon
#ไธปๆกๆถ
layout_main = QtWidgets.QVBoxLayout()
#ไปๆฌๅฐๆไปถไธญ่ทๅพๆๅบ็ๆไปถๅๅ่กจ
self.Name_List = []
for name in self.Local_Field_Dict:
self.Name_List.append(name)
self.Name_List.sort()
#grid็layout
grid_layout = QtWidgets.QGridLayout()
#ๅ็ง็ป
self.label_img_list = []
self.label_size_list = []
self.label_state_list = []
self.reset_group = QtWidgets.QButtonGroup()
self.reset_group.buttonClicked.connect(self.Reset)
self.upload_group = QtWidgets.QButtonGroup()
self.upload_group.buttonClicked.connect(self.Upload)
# upload_group = QtWidgets.QButtonGroup()
#ๅผๅงๅGridใๅฝๅๅพๆ ใๆฌๅฐๆไปถๅใๅฝๅๅคงๅฐใ็ถๆ
for i, name in enumerate(self.Name_List):
#ๆฏๅฆๅญๅจ่ฏฅๆไปถ
Exist = 0
if name in self.Game_Field_Dict:
Exist = 1
#0ๆฌๅฐๆไปถๅ
label_name = QtWidgets.QLabel(name)
grid_layout.addWidget(label_name, i, 0)
#1ๅบ่ฏฅ็ๅคงๅฐ
size_o = self.Local_Field_Dict[name].size()
label_size_o = QtWidgets.QLabel(str(size_o[0])+'x'+str(size_o[1]))
grid_layout.addWidget(label_size_o, i, 1)
#2ๅพๆ
if Exist:
img = OFE_Image(self.Game_Field_Dict[name]).PX_Image()
else:
img = Image.open(path0 + '/'+ 'panels/Panel_Void.png')
SIZE = (32,32)
img = img.resize(SIZE, Image.BICUBIC)
def PIXMAP(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
return pixmap
label_img = QtWidgets.QLabel()
label_img.setPixmap(PIXMAP(img))
label_img.setFixedSize(SIZE[0],SIZE[1])
grid_layout.addWidget(label_img, i, 2)
self.label_img_list.append(label_img)
#3ๅฝๅ็ๅคงๅฐ
if Exist:
size = self.Game_Field_Dict[name].size()
else:
size = (0, 0)
if size == size_o:
label_size = QtWidgets.QLabel("<font color='green'>" + str(size[0])+'x'+str(size[1]) + "</font>")
else:
label_size = QtWidgets.QLabel("<font color='red'>" + str(size[0])+'x'+str(size[1]) + "</font>")
grid_layout.addWidget(label_size, i, 3)
self.label_size_list.append(label_size)
#4็ถๆ
if Exist:
if self.Game_Field_Dict[name].data == self.Local_Field_Dict[name].data:
text = 'Original'
else:
text = 'Custom'
else:
text = 'Lost'
label_state = QtWidgets.QLabel()
if text == 'Original':
label_state.setText("<font color='green'>Original</font>")
if text == 'Custom':
label_state.setText("<font color='blue'>Custom</font>")
if text == 'Lost':
label_state.setText("<font color='red'>Lost</font>")
grid_layout.addWidget(label_state, i, 4)
self.label_state_list.append(label_state)
#5 Resetๆ้ฎ
reset_button = QtWidgets.QPushButton('Reset')
self.reset_group.addButton(reset_button, i)
grid_layout.addWidget(reset_button, i, 5)
#6 Uploadๆ้ฎ
upload_button = QtWidgets.QPushButton('Upload')
self.upload_group.addButton(upload_button, i)
grid_layout.addWidget(upload_button, i, 6)
#ๆปๅจๆก
scroll_widget = QtWidgets.QWidget()
scroll_widget.setLayout(grid_layout)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setAutoFillBackground(True)
scroll.setWidgetResizable(True)
layout_main.addWidget(scroll)
#ๆปๅธๅฑ
self.setLayout(layout_main)
#้็ฝฎ็ชๅฃๅคงๅฐ
width = self.sizeHint().width() + 20
height = self.sizeHint().height() + 200
self.resize(QtCore.QSize(width, height))
def Upload(self, button):
#ๅฝๅๆ้ฎidๅๅฏนๅบ็ๆไปถๅ
id = self.upload_group.id(button)
name = self.Name_List[id]
####ๅฐๆฌๅฐๆไปถๆฟๆขๅฐๆธธๆๆไปถ
if self.field_now:
if self.field_now.data:
##็ๆไธไธชไธดๆถๆไปถ
#ไธดๆถ็ฎๅฝ
path_temporary = path0 + '/'+ 'temporary'
file_temporary = open(path_temporary, 'wb')
file_temporary.write(self.field_now.get_bin())
file_temporary.close()
#ๅๅ
ฅ
remove_from_zip(self.game_path, name)
with zipfile.ZipFile(self.game_path, 'a') as pak_file:
pak_file.write(path_temporary, arcname=name)
#ๆดๆฐ
self.Update()
def Reset(self, button):
#ๅฝๅๆ้ฎidๅๅฏนๅบ็ๆไปถๅ
id = self.reset_group.id(button)
name = self.Name_List[id]
####ๅฐๆฌๅฐๆไปถๆฟๆขๅฐๆธธๆๆไปถ
##็ๆไธไธชไธดๆถๆไปถ
#ไธดๆถ็ฎๅฝ
path_temporary = path0 + '/'+ 'temporary'
file_temporary = open(path_temporary, 'wb')
file_temporary.write(self.Local_Field_Dict[name].get_bin())
file_temporary.close()
#ๅๅ
ฅ
remove_from_zip(self.game_path, name)
with zipfile.ZipFile(self.game_path, 'a') as pak_file:
pak_file.write(path_temporary, arcname=name)
#ๆดๆฐ
self.Update()
def Update(self):
#็ๆไปถไธชๆฐ
with zipfile.ZipFile(self.game_path) as pak_file:
name_list_o = pak_file.namelist()
count = 0
for name in name_list_o:
if name[-4:] == '.fld':
count += 1
print(count)
#ๆธธๆๅฐๅพๅ่กจ้ๆฐๅ ่ฝฝ
self.Game_Field_Dict = self.Open_Fields(path = self.game_path)
for i, name in enumerate(self.Name_List):
#ๆฏๅฆๅญๅจ่ฏฅๆไปถ
Exist = 0
if name in self.Game_Field_Dict:
Exist = 1
#2ๅพๆ
if Exist:
img = OFE_Image(self.Game_Field_Dict[name]).PX_Image()
else:
img = Image.open(path0 + '/'+ 'panels/Panel_Void.png')
SIZE = (32,32)
img = img.resize(SIZE, Image.BICUBIC)
def PIXMAP(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
return pixmap
self.label_img_list[i].setPixmap(PIXMAP(img))
#3ๅฝๅ็ๅคงๅฐ
size_o = self.Local_Field_Dict[name].size()
if Exist:
size = self.Game_Field_Dict[name].size()
else:
size = (0, 0)
if size == size_o:
self.label_size_list[i].setText("<font color='green'>" + str(size[0])+'x'+str(size[1]) + "</font>")
else:
self.label_size_list[i].setText("<font color='red'>" + str(size[0])+'x'+str(size[1]) + "</font>")
#4็ถๆ
if Exist:
if self.Game_Field_Dict[name].data == self.Local_Field_Dict[name].data:
text = 'Original'
else:
text = 'Custom'
else:
text = 'Lost'
if text == 'Original':
self.label_state_list[i].setText("<font color='green'>Original</font>")
if text == 'Custom':
self.label_state_list[i].setText("<font color='blue'>Custom</font>")
if text == 'Lost':
self.label_state_list[i].setText("<font color='red'>Lost</font>")
def Upload_Main(app, game_path = '', field = None, parent = None):
#ๆฃๆฅๆฌๅฐๆไปถๆฏๅฆๆญฃๅธธ
path = path0 + '/'+ 'fields.pak'
path = QtCore.QFileInfo(path).absoluteFilePath()
try:
pak_file = zipfile.ZipFile(path)
except:
QtWidgets.QMessageBox.critical(app, 'Error','Can not find fields.pak in '+path0, QtWidgets.QMessageBox.Ok)
return
else:
#ๆฃๆฅๆธธๆๆไปถ็ฎๅฝๆฏๅฆๅน้
def Get_Game_Pak(app, game_path):
game_path = QtCore.QFileInfo(game_path).absoluteFilePath()
#ๆฃๆฅๅ็งฐๅ็กฎ
file_name = QtCore.QFileInfo(game_path).fileName()
if file_name == 'fields.pak':
Name_Error = False
else:
Name_Error = True
#ๆฃๆฅๆฏๅฆๆฏๅๆณ็ๅ็ผฉๆไปถ
try:
game_zip = zipfile.ZipFile(game_path)
except:
Pak_Error = True
else:
Pak_Error = False
#ๆฃๆฅ่ทฏๅพๆฏๅฆๆฏๆฌๅฐ็่ทฏๅพ
if path == game_path:
Path_Error = True
else:
Path_Error = False
#ๅฆๆๅบ้๏ผๅ้ๆฐ้ๆฉ่ทฏๅพ
if Name_Error or Path_Error or Pak_Error:
options = QtWidgets.QFileDialog.Options()
game_path, _ = QtWidgets.QFileDialog.getOpenFileName(app,"Open the fields.pak in game data", path0 ,"Pak (*.pak);;All Files (*)", options=options)
#ๅๆฌก
#ๆฃๆฅๅ็งฐๅ็กฎ
file_name = QtCore.QFileInfo(game_path).fileName()
if file_name == 'fields.pak':
Name_Error = False
else:
Name_Error = True
#ๆฃๆฅๆฏๅฆๆฏๅๆณ็ๅ็ผฉๆไปถ
try:
game_zip = zipfile.ZipFile(game_path)
except:
Pak_Error = True
else:
Pak_Error = False
#ๆฃๆฅ่ทฏๅพๆฏๅฆๆฏๆฌๅฐ็่ทฏๅพ
if path == game_path:
Path_Error = True
else:
Path_Error = False
#้่ฟ/ๅฆๅณ
if Name_Error or Path_Error or Pak_Error:
if Name_Error:
QtWidgets.QMessageBox.critical(app, 'Error','You must find pak with name fields.pak', QtWidgets.QMessageBox.Ok)
elif Pak_Error:
QtWidgets.QMessageBox.critical(app, 'Error','Not a pak file', QtWidgets.QMessageBox.Ok)
elif Path_Error:
QtWidgets.QMessageBox.critical(app, 'Error','You must find fields.pak in game data', QtWidgets.QMessageBox.Ok)
else:
return game_path
#็ๅฎๆธธๆๅฐๅพ็ฎๅฝ๏ผๅฆๆ้ฎ้ขๅ่ฟๅ
game_path = Get_Game_Pak(app, game_path)
if not game_path:
return
#ๅฏน่ฏๆกๅผๅง
dialog = OFE_Upload(game_path, field, parent)
result = dialog.exec_()
pak_file.close()
return game_path
#ไปๆฌๅฐ็pakไธญ่ทๅพๅฐๅพdict
def Open_Fields(self, path):
with zipfile.ZipFile(path) as pak_file:
#ๆไปถๅ่กจ
name_list_o = pak_file.namelist()
name_fld = []
for name in name_list_o:
if name[-4:] == '.fld':
name_fld.append(name)
field_dict = {}
for name in name_fld:
fld_bin = pak_file.read(name)
field = OFE_Field('bin', fld_bin)
field_dict[name] = field
return field_dict
class OFE_New(QtWidgets.QDialog):
def __init__(self, parent = None):
super(OFE_New, self).__init__()
#ๅๅงๅ
self.setWindowTitle("New")
# self.setGeometry(300, 100, 400, 600)
#ๅฐๅพๆไปถๅ่กจ
self.Field_Dict = self.Open_Fields()
#ไธปๆกๆถ
layout_main = QtWidgets.QVBoxLayout()
#Title
title_label = QtWidgets.QLabel('Select a field size:')
layout_main.addWidget(title_label)
#Radioๆ้ฎlayout
radio_layout = QtWidgets.QGridLayout()
self.radio_group = QtWidgets.QButtonGroup()
self.size_list = []
for i, name in enumerate(self.Field_Dict):
field = self.Field_Dict[name]
size = field.size()
if not size in self.size_list:
self.size_list.append(size)
self.size_list.sort()
for i, size in enumerate(self.size_list):
radio = QtWidgets.QRadioButton(str(size[0])+'x'+str(size[1]))
self.radio_group.addButton(radio, i)
radio_layout.addWidget(radio, i, 0)
layout_main.addLayout(radio_layout)
#็กฎ่ฎคๅๆถๆ้ฎ
ok_cancel = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self)
ok_cancel.accepted.connect(self.accept)
ok_cancel.rejected.connect(self.reject)
layout_main.addWidget(ok_cancel)
#ๆปๅธๅฑ
self.setLayout(layout_main)
def Get_Size(app, parent = None):
path = path0 + '/'+ 'fields.pak'
try:
pak_file = zipfile.ZipFile(path)
except:
QtWidgets.QMessageBox.critical(app, 'Error','Can not find fields.pak in '+path0, QtWidgets.QMessageBox.Ok)
return
else:
dialog = OFE_New(parent)
result = dialog.exec_()
if result:
id = dialog.radio_group.checkedId()
if id >= 0:
size = dialog.size_list[id]
return size
else:
return
else:
return
def Open_Fields(self):
path = path0 + '/'+ 'fields.pak'
with zipfile.ZipFile(path) as pak_file:
#ๆไปถๅ่กจ
name_list_o = pak_file.namelist()
name_fld = []
for name in name_list_o:
if name[-4:] == '.fld':
name_fld.append(name)
field_dict = {}
for name in name_fld:
fld_bin = pak_file.read(name)
field = OFE_Field('bin', fld_bin)
field_dict[name] = field
return field_dict
class OFE_Files(QtWidgets.QDialog):
def __init__(self, parent = None):
super(OFE_Files, self).__init__()
#ๅๅงๅ
self.setWindowTitle("Open Official Field")
self.setGeometry(300, 100, 350, 600)
#ๅฐๅพๆไปถๅ่กจ
self.Field_Dict = self.Open_Fields()
if self.Field_Dict:
def ICON(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
icon = QtGui.QIcon(pixmap)
return icon
#ไธปๆกๆถ
layout_main = QtWidgets.QVBoxLayout()
#Title
title_label = QtWidgets.QLabel('Select a official field:')
layout_main.addWidget(title_label)
#Radioๆ้ฎlayout
radio_layout = QtWidgets.QGridLayout()
self.radio_group = QtWidgets.QButtonGroup()
self.Name_List = []
for name in self.Field_Dict:
self.Name_List.append(name)
self.Name_List.sort()
for i, name in enumerate(self.Name_List):
#ๅพ็
SIZE = (32, 32)
img = OFE_Image(self.Field_Dict[name]).PX_Image()
def PIXMAP(img):
ImgQt = ImageQt(img)
pixmap = QtGui.QPixmap.fromImage(ImgQt)
return pixmap
label_img = QtWidgets.QLabel()
label_img.setPixmap(PIXMAP(img))
label_img.setFixedSize(SIZE[0],SIZE[1])
radio_layout.addWidget(label_img, i, 1)
#radio๏ผๅๅญ
radio = QtWidgets.QRadioButton(name)
self.radio_group.addButton(radio, i)
radio_layout.addWidget(radio, i, 0)
#ๅคงๅฐ
size = self.Field_Dict[name].size()
label = QtWidgets.QLabel(str(size[0])+'x'+str(size[1]))
radio_layout.addWidget(label, i, 2)
#ๆปๅจๆก
scroll_widget = QtWidgets.QWidget()
scroll_widget.setLayout(radio_layout)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setAutoFillBackground(True)
scroll.setWidgetResizable(True)
layout_main.addWidget(scroll)
#็กฎ่ฎคๅๆถๆ้ฎ
ok_cancel = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self)
ok_cancel.accepted.connect(self.accept)
ok_cancel.rejected.connect(self.reject)
layout_main.addWidget(ok_cancel)
#ๆปๅธๅฑ
self.setLayout(layout_main)
#้็ฝฎ็ชๅฃๅคงๅฐ
width = self.sizeHint().width() + 20
height = self.sizeHint().height() + 200
self.resize(QtCore.QSize(width, height))
def Get_Field(app, parent = None):
path = path0 + '/'+ 'fields.pak'
try:
pak_file = zipfile.ZipFile(path)
except:
QtWidgets.QMessageBox.critical(app, 'Error','Can not find fields.pak in '+path0, QtWidgets.QMessageBox.Ok)
return
else:
dialog = OFE_Files(parent)
result = dialog.exec_()
if result:
id = dialog.radio_group.checkedId()
if id >= 0:
name = dialog.Name_List[id]
field = dialog.Field_Dict[name]
return field, name
else:
return
else:
return
def Open_Fields(self):
path = path0 + '/'+ 'fields.pak'
pak_file = zipfile.ZipFile(path)
#ๆไปถๅ่กจ
name_list_o = pak_file.namelist()
name_fld = []
for name in name_list_o:
if name[-4:] == '.fld':
name_fld.append(name)
field_dict = {}
for name in name_fld:
fld_bin = pak_file.read(name)
field = OFE_Field('bin', fld_bin)
field_dict[name] = field
return field_dict
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
field = OFE_Files.Get_Field(app)
print(field)
sys.exit(app.exec_())
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,080
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Field.py
|
#OFE_Field
import struct
import math
class OFE_Field:
def __init__(self, order = None, parameter = None):
self.data = None
#ๆฐๅปบ
if order == 'new':
X, Y = parameter
self.data = []
for j in range(Y):
self.data.append([])
for i in range(X):
self.data[j].append([0,0])
#ไปๆไปถ่ฏปๅ
if order == 'open':
file = open(parameter, 'rb')
text = file.read()
file.close()
int_num = len(text)/4
int_list = struct.unpack('%di'%int_num, text)
field_o = list(int_list)
#ๅฐๅพๅฐบๅฏธ
num = int(len(field_o)/2)
size = int(math.sqrt(num))
if size*size == num:
x = size
y = size
else:
while num % size != 0:
size -= 1
y = size
x = int(num/size)
self.data = []
for i in range(y):
self.data.append([])
for j in range(x):
self.data[i].append([field_o[2*(i*x+j)], field_o[2*(i*x+j)+1]])
#ไปไบ่ฟๅถๅฏผๅ
ฅ
if order == 'bin':
text = parameter
int_num = len(text)/4
int_list = struct.unpack('%di'%int_num, text)
field_o = list(int_list)
#ๅฐๅพๅฐบๅฏธ
num = int(len(field_o)/2)
size = int(math.sqrt(num))
if size*size == num:
x = size
y = size
else:
while num % size != 0:
size -= 1
y = size
x = int(num/size)
self.data = []
for i in range(y):
self.data.append([])
for j in range(x):
self.data[i].append([field_o[2*(i*x+j)], field_o[2*(i*x+j)+1]])
#ไปๆฐๆฎ็ๆ
if order == 'create':
self.data = parameter
def get_bin(self):
int_list = []
X, Y = self.size()
for y in range(Y):
for x in range(X):
int_list.append(self.data[y][x][0])
int_list.append(self.data[y][x][1])
num = len(int_list)
text = struct.pack('%di'%num, *int_list)
return text
def Save(self, path = ''):
if path != '' and self.data:
text = self.get_bin()
file = open(path, 'wb')
file.write(text)
file.close()
def size(self):
if self.data:
x = len(self.data[0])
y = len(self.data)
return (x, y)
def has_value(self):
size = self.size()
X = size[0]
Y = size[1]
for y in range(Y):
for x in range(X):
if self.data[y][x][0] != 0:
return True
return False
def Get_Section(self, rec):
pos1 = rec[0]
pos2 = rec[1]
x1 = pos1[0]
y1 = pos1[1]
x2 = pos2[0]
y2 = pos2[1]
data_new = []
for y in range(y1, y2+1):
j = y - y1
data_new.append([])
for x in range(x1, x2+1):
i = x - x1
data_new[j].append([])
data_new[j][i].append(self.data[y][x][0])
data_new[j][i].append(self.data[y][x][1])
return data_new
def Cut(self, rec):
data_new = self.Get_Section(rec)
self.Fill(rec, 0)
return data_new
def Copy(self, rec):
data_new = self.Get_Section(rec)
return data_new
def Paste(self, pos, data_new):
x1 = pos[0]
y1 = pos[1]
I = len(data_new[0])
J = len(data_new)
X = self.size()[0]
Y = self.size()[1]
for j in range(J):
y = j + y1
for i in range(I):
x = i + x1
if y < Y and x < X and y >= 0 and x >= 0:
self.data[y][x][0] = data_new[j][i][0]
self.data[y][x][1] = data_new[j][i][1]
def Arrow_Transform(self, arrow_num, type = ''):
def horizonal(num):
char = [0, 0, 0, 0]
for i in range(4):
key_num = 2**(2*i)
value = num & key_num
if value:
char[i] = 1
num_new = num
for i in range(4):
offset = 0
if i%2:
offset = -2
else:
offset = 2
if char[i]:
key_num = 2**(2*i + offset)
num_new = num_new | key_num
else:
key_num = 2**8 - 2**(2*i + offset) - 1
num_new = num_new & key_num
return num_new
def vertical(num):
char = [0, 0, 0, 0]
for i in range(4):
key_num = 2**(2*i + 1)
value = num & key_num
if value:
char[i] = 1
num_new = num
for i in range(4):
offset = 0
if i%2:
offset = -2
else:
offset = 2
if char[i]:
key_num = 2**(2*i + 1 + offset)
num_new = num_new | key_num
else:
key_num = 2**8 - 2**(2*i + 1 + offset) - 1
num_new = num_new & key_num
return num_new
def cycle(num, command = 'clock'):
num_new = 0
if command == 'clock':
storage = int(num / 8)
num_new = num << 1
num_new %= 16
num_new += storage
return num_new
elif command == 'anticlock':
storage = num % 2
num_new = num >> 1
num_new += storage * 8
return num_new
def clockwise(num):
num1 = num % 16
num2 = num - num1
num1_new = cycle(num1, 'clock')
num2_new = cycle(num2, 'clock')
num_new = num2_new * 16 + num1_new
return num_new
def anticlockwise(num):
num1 = num % 16
num2 = num - num1
num1_new = cycle(num1, 'anticlock')
num2_new = cycle(num2, 'anticlock')
num_new = num2_new * 16 + num1_new
return num_new
if type == 'horizonal':
return horizonal(arrow_num)
elif type == 'vertical':
return vertical(arrow_num)
elif type == 'clockwise':
return clockwise(arrow_num)
elif type == 'anticlockwise':
return anticlockwise(arrow_num)
def Horizonal(self):
X, Y = self.size()
data_new = []
for y in range(Y):
data_new.append([])
for x in range(X):
data_new[y].append([])
data_new[y][x].append(self.data[y][X-x-1][0])
data_new[y][x].append(self.Arrow_Transform(self.data[y][X-x-1][1], 'horizonal'))
self.data = data_new
def Vertical(self):
X, Y = self.size()
data_new = []
for y in range(Y):
data_new.append([])
for x in range(X):
data_new[y].append([])
data_new[y][x].append(self.data[Y-y-1][x][0])
data_new[y][x].append(self.Arrow_Transform(self.data[Y-y-1][x][1], 'vertical'))
self.data = data_new
def Clockwise(self):
X, Y = self.size()
X_new = Y
Y_new = X
data_new = []
for y in range(Y_new):
data_new.append([])
for x in range(X_new):
data_new[y].append([])
data_new[y][x].append(self.data[Y-x-1][y][0])
data_new[y][x].append(self.Arrow_Transform(self.data[Y-x-1][y][1], 'clockwise'))
self.data = data_new
def AntiClockwise(self):
X, Y = self.size()
X_new = Y
Y_new = X
data_new = []
for y in range(Y_new):
data_new.append([])
for x in range(X_new):
data_new[y].append([])
data_new[y][x].append(self.data[x][X-y-1][0])
data_new[y][x].append(self.Arrow_Transform(self.data[x][X-y-1][1], 'anticlockwise'))
self.data = data_new
def Free(self, command):
if command == 'clockwise':
self.Clockwise()
elif command == 'anticlockwise':
self.AntiClockwise()
elif command == 'vertical':
self.Vertical()
elif command == 'horizonal':
self.Horizonal()
def Point_IsVoid(self, pos):
x = pos[0]
y = pos[1]
old_panel = self.data[y][x][0]
if old_panel == 0 or old_panel == 18:
return True
return False
def Point_Panel(self, pos, panel_id, setting = 'Default'):
x = pos[0]
y = pos[1]
old_panel = self.data[y][x][0]
old_arrow = self.data[y][x][1]
self.data[y][x][0] = panel_id
if setting == 'Default':
if panel_id == 0 or panel_id == 18:
self.data[y][x][1] = 0
if old_panel != self.data[y][x][0] or old_arrow != self.data[y][x][1]:
return True
else:
return False
def Point_Arrow(self, pos, arrow_command = [0,0,0,0], BackTrack = 0):
x = pos[0]
y = pos[1]
old_arrow = self.data[y][x][1]
new_arrow = old_arrow
def If_Arrow(arrow, index):
return int(arrow / (2**index)) % 2
def Change_Arrow(arrow, index, command):
new_arrow = arrow
if command == 1 and not If_Arrow(arrow, index):
new_arrow += 2**index
elif command == -1 and If_Arrow(arrow, index):
new_arrow -= 2**index
return new_arrow
Back_Index = 0
if BackTrack:
Back_Index = 4
for i in range(4):
index = i + Back_Index
new_arrow = Change_Arrow(new_arrow, index, arrow_command[i])
self.data[y][x][1] = new_arrow
if old_arrow != new_arrow:
return True
else:
return False
def Fill(self, rec, panel_id, BackTrack = 0):
pos1 = rec[0]
pos2 = rec[1]
x1 = pos1[0]
y1 = pos1[1]
x2 = pos2[0]
y2 = pos2[1]
for y in range(y1, y2+1):
for x in range(x1, x2+1):
if panel_id < 100:
self.data[y][x][0] = panel_id
if panel_id == 0 or panel_id == 18:
self.data[y][x][1] = 0
if panel_id == 101:
self.Point_Arrow((x,y), [-1,-1,-1,-1], BackTrack)
return True
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,081
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_Status.py
|
from PyQt5 import QtGui, QtWidgets, QtCore
class StatusWindow(QtWidgets.QWidget):
def __init__(self, parent = None):
QtWidgets.QWidget.__init__(self, parent)
#ๅๅงๅ
self.Status = {}
#ๅๅงๅๆฐ
self.Status['History_Len'] = 0
self.Status['History_Pos'] = 1
self.Status['Last_Action'] = 'None'
self.Status['Selected'] = []
self.Status['Button'] = 18
self.Status['BackTrack'] = 0
self.Status['Test'] = ''
#ไธปๆกๆถ
layout_main = QtWidgets.QVBoxLayout()
#ไธปๆๆฌ
self.label_main = QtWidgets.QLabel(self)
self.label_main.setText('test')
layout_main.addWidget(self.label_main)
self.setLayout(layout_main)
def A_Status(self, command):
for key in command:
self.Status[key] = command[key]
self.Text_Refresh()
def Status_Refresh(self):
self.Text_Refresh()
def Text_Refresh(self):
text = ''
# text += '--------Status--------' + '\n'
###Command
text += '----Command----' + '\n'
#last action
text += 'Last Action : '
last_action = self.Status['Last_Action']
text += last_action
text += '\n'
#Selected
text += 'Selected : '
selected = self.Status['Selected']
if selected == []:
text += 'None'
else:
x = selected[1][0] - selected[0][0] +1
y = selected[1][1] - selected[0][1] +1
text += str(x) + ' x ' + str(y)
text += '; '
text += str(selected[0]) + '-' + str(selected[1])
text += '\n'
#button
text += 'Button : '
button_id = self.Status['Button']
Button_Name = ['Void', 'Check', 'Bonus', 'Bonus_2', 'Drop', 'Drop_2', 'Encounter', 'Encounter_2', 'Draw', 'Draw_2',
'Move', 'Move_2', 'WarpMove', 'WarpMove_2', 'Warp', 'Snow', 'Neutral', 'Deck'] + ['Mouse', 'ArrowDelete', 'ArrowLine', 'ArrowLineDelete', 'OK', 'Cancel']
text += Button_Name[button_id]
text += '\n'
###View
text += '----View----' + '\n'
#backtrack
text += 'BackTrack : '
backtrack = self.Status['BackTrack']
if backtrack:
text += 'On'
else:
text += 'Off'
text += '\n'
###Parameter
text += '----Parameter----' + '\n'
#history
text += 'History : '
history_now = self.Status['History_Len']-self.Status['History_Pos']
history_abs = self.Status['History_Len'] - 1
text += str(history_now)
if history_now != history_abs:
text += '('+str(history_abs)+')'
text += '\n'
###Test
text += '----Test----' + '\n'
test = self.Status['Test']
text += test
text += '\n'
self.label_main.setText(text)
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,082
|
zirconium-n/OFE
|
refs/heads/master
|
/OFE/OFE_main.py
|
import sys, os
import re, struct
from PyQt5 import QtGui, QtWidgets, QtCore
from PIL import Image
from PIL.ImageQt import ImageQt
from OFE.OFE_Field import OFE_Field
from OFE import ButtonWindow
from OFE import StatusWindow
from OFE import Canvas_Tab
from OFE import OFE_Upload, OFE_New, OFE_Files
from OFE.OFE_Graphics import OFE_Graphics
#ๆ น็ฎๅฝ
path0 = os.path.dirname(__file__)
#็ๆฌๅท
VERSION = ' v0.3'
class OFE_MainWindow(QtWidgets.QMainWindow):
#ๅฝไปคๅค็ไฟกๅท๏ผๅฝ้่ฆๅค็ๅฝไปคๅนถๅ ๆญคๆนๅ็้ข็ญไฟกๆฏๆถ๏ผๅๅฐ็ปOFE_MainWindow::A_Command๏ผๅๆฐไธบๅญๅ
ธ๏ผ่ฃ
็้่ฆๆง่ก็ๅฝไปคใ
CommandEmitApp = QtCore.pyqtSignal(dict)
#ๆ้ฎๆไธไฟกๅท๏ผๅฝๆ้ฎ่ขซๆไธๆถ๏ผไปButtonWindow::Button_Clickๅๅฐ๏ผๅๆฐไธบๆ้ฎid
ButtonEmitApp = QtCore.pyqtSignal(int)
def __init__(self):
super(OFE_MainWindow, self).__init__()
self.initUI() #็้ข็ปๅถไบค็ปInitUiๆนๆณ
def initUI(self):
#ๆ ้ขๅๅพๆ
self.setWindowTitle("100oj Fields Editor" + VERSION)
self.setWindowIcon(QtGui.QIcon(path0 + '/'+ 'panels/Panel_Check.png'))
##ๅ ่ฝฝๅ
จๅฑๅๆฐ
self.PARAMETER = self.Init_PARAMETER()
#็ชๅฃไฝ็ฝฎ๏ผๆฅ่ชๅ
จๅฑๅๆฐ๏ผ
window_pos = self.PARAMETER['Img_parameter']['Window_Pos']
self.setGeometry(window_pos[0], window_pos[1], 1000, 600)
##ๆงไปถๅธๅฑ
main_ground = QtWidgets.QWidget(self)
self.setCentralWidget(main_ground)
self.layout_main = QtWidgets.QHBoxLayout(main_ground)
layout_sub = QtWidgets.QVBoxLayout()
#็ปๆฟๅบ
self.canvaswindow = Canvas_Tab(self.PARAMETER, App = {'Command':self.CommandEmitApp})
#ไพง่พนๆ
self.statuswindow = StatusWindow()
verticalSpacer = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.buttonwindow = ButtonWindow(self.PARAMETER, App = {'Command':self.CommandEmitApp, 'Button':self.ButtonEmitApp})
layout_sub.addWidget(self.statuswindow)
layout_sub.addItem(verticalSpacer)
layout_sub.addWidget(self.buttonwindow)
layout_sub.addItem(verticalSpacer)
self.layout_main.addWidget(self.canvaswindow)
self.layout_main.addLayout(layout_sub)
main_ground.setLayout(self.layout_main)
##่ๅๆ
self.Set_Menu()
###ๆปCommand่ฟๆฅ###
self.CommandEmitApp.connect(self.A_Command)
#ๆ้ฎๆไธไฟกๅท
self.ButtonEmitApp.connect(self.Button_Click)
#ไธปๆดๆฐ
self.A_Command({'Menu': None, 'Status': {}, 'Resize': None})
#่ฎพ็ฝฎ่ๅๆ
def Set_Menu(self):
menubar = self.menuBar()
self.Menu_All = {}
def set_menu(name, connect, shortcut = '', StatusTip = '', checkable=False):
menu = QtWidgets.QAction(name,self,checkable = checkable)
menu.setShortcut(shortcut)
menu.setStatusTip(StatusTip)
menu.triggered.connect(connect)
return menu
##ๆไปถ
file = menubar.addMenu("File")
#ๆฐๅปบ
new_menu = set_menu('New...', self.New, 'Ctrl+N', 'Open a new field')
file.addAction(new_menu)
self.Menu_All['New'] = new_menu
#ๆๅผ
open_menu = set_menu('Open...', self.Open, 'Ctrl+O', 'Open an existing field')
file.addAction(open_menu)
self.Menu_All['Open'] = open_menu
#ๆๅผ
open_official_menu = set_menu('Open Official', self.Open_Official, 'Open an official field')
file.addAction(open_official_menu)
self.Menu_All['Open_Official'] = open_official_menu
#ๅ
ณ้ญ
close_menu = set_menu('Close', self.Close, 'Ctrl+W', 'Close the current field')
file.addAction(close_menu)
self.Menu_All['Close'] = close_menu
#--
file.addSeparator()
#ไฟๅญ
save_menu = set_menu('Save Field', self.Save, 'Ctrl+S', 'Save the field in its current field name')
file.addAction(save_menu)
self.Menu_All['Save'] = save_menu
#ๅฆๅญไธบ
save_as_menu = set_menu('Save Field As...', self.Save_As, 'Save the field with a new name')
file.addAction(save_as_menu)
self.Menu_All['Save_As'] = save_as_menu
#--
file.addSeparator()
#ไธไผ
upload_menu = set_menu('Upload', self.Upload, 'Ctrl+U', 'Upload the field to the game')
file.addAction(upload_menu)
self.Menu_All['Upload'] = upload_menu
#--
file.addSeparator()
#้ๅบ
exit_menu = set_menu('Exit', QtWidgets.qApp.quit, "Alt+F4", "Exit")
file.addAction(exit_menu)
self.Menu_All['Exit'] = exit_menu
##็ผ่พ
edit = menubar.addMenu("Edit")
#ๆค้
undo_menu = set_menu('Undo', self.Undo, 'Ctrl+Z', 'Undo the last action')
edit.addAction(undo_menu)
self.Menu_All['Undo'] = undo_menu
#้ๅ
redo_menu = set_menu('Redo', self.Redo, 'Ctrl+Y', 'Redo the last action')
edit.addAction(redo_menu)
self.Menu_All['Redo'] = redo_menu
#--
edit.addSeparator()
#ๅชๅ
cut_menu = set_menu('Cut', self.Cut, 'Ctrl+X', 'Cut the section and put it on the Clipboard')
edit.addAction(cut_menu)
self.Menu_All['Cut'] = cut_menu
#ๅคๅถ
copy_menu = set_menu('Copy', self.Copy, 'Ctrl+C', 'Copy the section and put it on the Clipboard')
edit.addAction(copy_menu)
self.Menu_All['Copy'] = copy_menu
#็ฒ่ดด
paste_menu = set_menu('Paste', self.Paste, 'Ctrl+V', 'Insert Clipboard contents')
edit.addAction(paste_menu)
self.Menu_All['Paste'] = paste_menu
#--
edit.addSeparator()
#ๅๆข
transform_menu = set_menu('Transform', self.Transform, 'Ctrl+T', 'Transform the section')
edit.addAction(transform_menu)
self.Menu_All['Transform'] = transform_menu
#duplicate
duplicate_menu = set_menu('Duplicate', self.Duplicate, 'Ctrl+D', 'Duplicate and transform the section')
edit.addAction(duplicate_menu)
self.Menu_All['Duplicate'] = duplicate_menu
##่งๅพ
view = menubar.addMenu("View")
#ๆนๅ่ๆฏ้ข่ฒ
background_menu = set_menu('Background color', self.Background, StatusTip = 'Set background color')
view.addAction(background_menu)
self.Menu_All['Background'] = background_menu
#--
view.addSeparator()
#็้ข็ผฉๆพๅคงๅฐ
zoom_level_menu = view.addMenu("Zoom Level")
zoom_level_menu.setStatusTip('Change Zoom Level')
self.zoom_group = QtWidgets.QActionGroup(self, exclusive=True)
for zoom in self.PARAMETER['Img_parameter']['Zoom_List']:
action = self.zoom_group.addAction(QtWidgets.QAction(str(zoom), self, checkable=True))
action.triggered.connect(self.Zoom_Level)
zoom_level_menu.addAction(action)
if zoom == self.PARAMETER['Img_parameter']['Zoom']:
action.setChecked(True)
#ๆ้ฎ็ผฉๆพๅคงๅฐ
button_zoom_level_menu = view.addMenu("Button Zoom Level")
button_zoom_level_menu.setStatusTip('Change Buttons Zoom Level')
self.button_zoom_group = QtWidgets.QActionGroup(self, exclusive=True)
for zoom in self.PARAMETER['Img_parameter']['Zoom_List']:
action = self.button_zoom_group.addAction(QtWidgets.QAction(str(zoom), self, checkable=True))
action.triggered.connect(self.Button_Zoom_Level)
button_zoom_level_menu.addAction(action)
if zoom == self.PARAMETER['Img_parameter']['Button_Zoom']:
action.setChecked(True)
#--
view.addSeparator()
#BackTrack
backtrack_menu = set_menu('BackTrack', self.BackTrack, StatusTip = 'Switch BackTrack', checkable = True)
view.addAction(backtrack_menu)
self.Menu_All['BackTrack'] = backtrack_menu
#ๅๅงๅๅๆฐ
def Init_PARAMETER(self):
parameter = {}
#่ฏปๅๅๆฐๆไปถ
try:
file_para = open(path0 + '/'+ 'user.dat', 'r')
except:
text_para = ''
else:
text_para = file_para.read()
print(text_para)
file_para.close()
#ๅจๆๆฌไธญๅฏปๆพๅฏนๅบๅๆฐ
def find_parameter(text, name, default):
try:
text1 = re.search(name + '=.+', text).group()
except:
value = default
else:
pos = text1.find('=')
value = text1[pos+1:]
if type(default) == type(0.75):
value = float(value)
elif type(default) == type(1):
value = int(value)
elif type(default) == type((1,2,3)):
p = re.compile(',')
value = tuple(map(int, (p.split(value[1:-1]))))
elif type(default) == type('path'):
value = str(value)
return value
#ๆไปถๅๆฐ
parameter['Clipboard'] = None
parameter['Path_Save'] = find_parameter(text_para, 'Path_Save', path0)
parameter['Path_Game'] = find_parameter(text_para, 'Path_Game', path0)
#่งๅพๅๆฐ
parameter['Img_parameter'] = {}
parameter['Img_parameter']['Window_Pos'] = find_parameter(text_para, 'Window_Pos', (600, 60))
parameter['Img_parameter']['Zoom_List'] = (0.25, 0.375, 0.5, 0.625, 0.75, 1.0)
parameter['Img_parameter']['Zoom'] = find_parameter(text_para, 'Zoom', 0.5)
parameter['Img_parameter']['Background'] = find_parameter(text_para, 'Background', (52,52,52,256))
parameter['Img_parameter']['Show_arrows'] = find_parameter(text_para, 'Show_arrows', 1)
parameter['Img_parameter']['Button_Zoom'] = find_parameter(text_para, 'Button_Zoom', 0.5)
parameter['Img_parameter']['BackTrack'] = 0
parameter['Img_parameter']['Frame'] = find_parameter(text_para, 'Frame', 1)
#่ๅๅฏ็จๅๆฐ
parameter['Menu_able'] = {}
parameter['Menu_able']['Close'] = 1
parameter['Menu_able']['Save'] = 1
parameter['Menu_able']['Save_As'] = 1
parameter['Menu_able']['Undo'] = 1
parameter['Menu_able']['Redo'] = 1
parameter['Menu_able']['Cut'] = 1
parameter['Menu_able']['Copy'] = 1
parameter['Menu_able']['Paste'] = 1
parameter['Menu_able']['Transform'] = 1
parameter['Menu_able']['Duplicate'] = 1
#ๆถๅๅฝไปค้ป่พ็็ธๅ
ณๅๆฐ
parameter['Command'] = {}
#ๅฝๅๆไธ็ๆ้ฎ
parameter['Command']['Button'] = 18
#ๅ ่ฝฝๅพ็็ด ๆ
zoom_list = parameter['Img_parameter']['Zoom_List'] = (0.25, 0.375, 0.5, 0.625, 0.75, 1.0)
parameter['Graphics'] = OFE_Graphics(zoom_list, path0 + '/'+ 'panels')
#่ฎพ็ฝฎๆ้ฎId
parameter['Button'] = {}
parameter['Button']['Type'] = ['Panel', 'Mouse', 'Transform']
parameter['Button']['Specific'] = [['Void', 'Check', 'Bonus', 'Bonus_2', 'Drop', 'Drop_2', 'Encounter', 'Encounter_2',
'Draw', 'Draw_2', 'Move', 'Move_2', 'WarpMove', 'WarpMove_2', 'Warp', 'Snow', 'Neutral', 'Deck'],
['Mouse', 'ArrowDelete', 'ArrowLine', 'ArrowLineDelete', 'OK', 'Cancel'],
['Clock_test', 'AntiClock_test', 'Vertical_test', 'Horizonal_test', 'OK', 'Cancel']]
parameter['Button']['Id'] = {}
parameter['Button']['Name'] = {}
for i, type_ in enumerate(parameter['Button']['Type']):
for j, specific in enumerate(parameter['Button']['Specific'][i]):
id = 100*i + j
parameter['Button']['Id'][id] = specific
parameter['Button']['Name'][specific] = id
print(parameter['Button']['Id'])
print(parameter['Button']['Name'])
return parameter
#้ๅ
ณ้ญไบไปถ
def closeEvent(self, event):
#ๅๅ
ฅๅๆฐๆไปถ
file_para = open(path0 + '/'+ 'user.dat', 'w')
text = ''
def write_parameter(text, name, value):
text += name + '=' + str(value) + '\n'
return text
text = write_parameter(text, 'Path_Save', self.PARAMETER['Path_Save'])
text = write_parameter(text, 'Path_Game', self.PARAMETER['Path_Game'])
text = write_parameter(text, 'Window_Pos', self.PARAMETER['Img_parameter']['Window_Pos'])
text = write_parameter(text, 'Zoom', self.PARAMETER['Img_parameter']['Zoom'])
text = write_parameter(text, 'Background', self.PARAMETER['Img_parameter']['Background'])
text = write_parameter(text, 'Show_arrows', self.PARAMETER['Img_parameter']['Show_arrows'])
text = write_parameter(text, 'Button_Zoom', self.PARAMETER['Img_parameter']['Button_Zoom'])
text = write_parameter(text, 'Frame', self.PARAMETER['Img_parameter']['Frame'])
file_para.write(text)
file_para.close()
#้ๅ็งปๅจไบไปถ
def moveEvent(self, event):
pos = (event.pos().x(), event.pos().y())
self.PARAMETER['Img_parameter']['Window_Pos'] = pos
###ๆปCommandๅฝๆฐ###
def A_Command(self, command):
#Paint๏ผๅจ็ปๆฟไธ้็ป๏ผcommand['Paint'] = {}
if 'Paint' in command:
self.canvaswindow.A_Paint(command['Paint'])
#Button๏ผๆนๅๆ้ฎๅฝข่ฒ๏ผcommand['Button'] = {}
if 'Button' in command:
#ๅ
่ฐ็จไธ็บง๏ผไป่็ปcommand['Button']['Icon']่ตๅผ = {'Type': str}
if 'Icon' in command['Button']:
self.canvaswindow.A_Button(command['Button']['Icon'])
self.buttonwindow.A_Button(command['Button'])
#Resize
if 'Resize' in command:
self.Resize()
#Menu
if 'Menu' in command:
self.Menu_Refresh()
#Status
if 'Status' in command:
#ๅ
่ฐ็จไธ็บง๏ผไป่็ปcommand['Status']่ตๅผ = {...}
self.canvaswindow.A_Status(command['Status'])
self.statuswindow.A_Status(command['Status'])
#Tab
if 'Tab' in command:
self.canvaswindow.Tab_Refresh()
#ๅฐบๅฏธ่ฐๆด
def Resize(self):
#ๅฑๅนๅคงๅฐ
screen = QtWidgets.QDesktopWidget().screenGeometry()
MaxWidth = screen.width()-200
MaxHeight = screen.height()-100
#ๆจ่็ชๅฃๅฐบๅฏธ๏ผๆฅ่ช็ปๆฟ
commandwidth = self.canvaswindow.width() + 86
commandheight = self.canvaswindow.height() + 150
#ๆฅ่ชๆ้ฎ
PX = 128
button_zoom = self.PARAMETER['Img_parameter']['Button_Zoom']
px = int(PX * button_zoom)
commandwidth += 6 * px
self.resize(min(commandwidth, MaxWidth), min(commandheight,MaxHeight))
def Menu_Refresh(self):
#่ฐ็จไธ็บง
self.canvaswindow.Menu_Change()
#่ๅๆ ็ฎก็
for key in self.PARAMETER['Menu_able']:
if self.PARAMETER['Menu_able'][key]:
self.Menu_All[key].setEnabled(False)
else:
self.Menu_All[key].setEnabled(True)
#ๆ ้ข็ฎก็
id = self.canvaswindow.currentIndex()
if id >= 0:
if self.canvaswindow.Canvas_List[id].Is_Field():
text = "100oj Fields Editor" + VERSION
file_full = self.canvaswindow.Canvas_List[id].file_path()
if file_full != '':
text += " - " + file_full
self.setWindowTitle(text)
else:
self.setWindowTitle("100oj Fields Editor" + VERSION)
else:
self.setWindowTitle("100oj Fields Editor" + VERSION)
#ๆดๆฐ็ถๆ
def Button_Click(self, id):
print("Main Button Click", id)
self.canvaswindow.Button_Click(id)
def New(self):
Size = OFE_New.Get_Size(self)
if Size:
#ๆฐๅปบๅนถๅญๅ
ฅ็ปๆฟ
field = OFE_Field('new', Size)
self.canvaswindow.Insert_Canvas(field, 'Untitled')
#A_Command
a_command = {}
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[New]'
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Open(self):
options = QtWidgets.QFileDialog.Options()
file_full, _ = QtWidgets.QFileDialog.getOpenFileName(self,"Open a field", self.PARAMETER['Path_Save'],"Fields (*.fld);;All Files (*)", options=options)
if file_full:
file_name = QtCore.QFileInfo(file_full).fileName()
file_path = QtCore.QFileInfo(file_full).absolutePath()
#้่ฎพ้ป่ฎค่ทฏๅพ
self.PARAMETER['Path_Save'] = file_path
#ๆๅผๆไปถ่ณ็ปๆฟ
field = OFE_Field('open', file_full)
self.canvaswindow.Insert_Canvas(field, file_name, file_full)
#A_Command
a_command = {}
#ๅฐบๅฏธ่ฐๆด
a_command['Resize'] = None
#่ๅ่ฐๆด
a_command['Menu'] = None
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[Open] ' + file_name
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Open_Official(self):
Field_and_Name = OFE_Files.Get_Field(self)
if Field_and_Name:
field = Field_and_Name[0]
name = Field_and_Name[1]
self.canvaswindow.Insert_Canvas(field, name)
#A_Command
a_command = {}
#ๅฐบๅฏธ่ฐๆด
a_command['Resize'] = None
#่ๅ่ฐๆด
a_command['Menu'] = None
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[Open] ' + name
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Close(self):
file_name = self.canvaswindow.Remove_Canvas()
if file_name:
#A_Command
a_command = {}
#ๅฐบๅฏธ่ฐๆด
a_command['Resize'] = None
#่ๅ่ฐๆด
a_command['Menu'] = None
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[Close] '+file_name
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
else:
print('Error: Can not close.')
def Save(self):
need_save = self.canvaswindow.Need_Save()
if need_save:
file_path = self.canvaswindow.file_path()
if file_path == '':
self.Save_As()
else:
file_full = file_path
#ๅๅงๅ
file_name = QtCore.QFileInfo(file_full).fileName()
file_path = QtCore.QFileInfo(file_full).absolutePath()
#้่ฎพ้ป่ฎค่ทฏๅพ
self.PARAMETER['Path_Save'] = file_path
#ๅจๅญๆๆไปถ
self.canvaswindow.Save(file_full)
#A_Command
a_command = {}
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[Save] ' + file_name
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Save_As(self):
options = QtWidgets.QFileDialog.Options()
file_full, _ = QtWidgets.QFileDialog.getSaveFileName(self,"Save Field", self.PARAMETER['Path_Save'],"Fields (*.fld);;All Files (*)", options=options)
if file_full:
#ๅๅงๅ
file_name = QtCore.QFileInfo(file_full).fileName()
file_path = QtCore.QFileInfo(file_full).absolutePath()
#้่ฎพ้ป่ฎค่ทฏๅพ
self.PARAMETER['Path_Save'] = file_path
#ๅจๅญๆๆไปถ
self.canvaswindow.Save(file_full)
#A_Command
a_command = {}
#Last Action
a_command['Status'] = {}
a_command['Status']['Last_Action'] = '[Save] ' + file_name
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Upload(self):
#่ทๅๅฝๅfield
id = self.canvaswindow.currentIndex()
if id >= 0:
field_now = self.canvaswindow.Field()
#ๆๅผdialog๏ผ่ฟๅgameๆไปถ็ๆฐ่ทฏๅพ
path_new = OFE_Upload.Upload_Main(self, self.PARAMETER['Path_Game'], field_now)
if path_new:
self.PARAMETER['Path_Game'] = path_new
def Undo(self):
self.canvaswindow.Undo()
def Redo(self):
self.canvaswindow.Redo()
def Cut(self):
self.canvaswindow.Cut()
def Copy(self):
self.canvaswindow.Copy()
def Paste(self):
self.canvaswindow.Paste()
def Transform(self):
self.canvaswindow.Transform()
def Duplicate(self):
self.canvaswindow.Duplicate()
def Background(self):
col = QtWidgets.QColorDialog.getColor()
if col.isValid():
self.PARAMETER['Img_parameter']['Background'] = (col.red(), col.green(), col.blue(), 256)
def Zoom_Level(self):
action_this = self.zoom_group.checkedAction()
zoom = float(action_this.text())
self.PARAMETER['Img_parameter']['Zoom'] = zoom
#A_Command
a_command = {}
#ๅพๅๅทๆฐ๏ผtransformๅทๆฐ
a_command['Paint'] = {'All':None, 'Transform_Redraw': None}
#็ปๅธๅคงๅฐ
a_command['Resize'] = None
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def Button_Zoom_Level(self):
action_this = self.button_zoom_group.checkedAction()
zoom = float(action_this.text())
self.PARAMETER['Img_parameter']['Button_Zoom'] = zoom
#A_Command
a_command = {}
#ๆ้ฎๅคงๅฐ้่ฎพ
a_command['Button'] = {'Zoom': None}
#็ปๅธๅคงๅฐ
a_command['Resize'] = None
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def BackTrack(self, state):
if state:
self.PARAMETER['Img_parameter']['BackTrack'] = 1
else:
self.PARAMETER['Img_parameter']['BackTrack'] = 0
#A_Command
a_command = {}
#ๅพๅๅทๆฐ๏ผtransformๅทๆฐ
a_command['Paint'] = {'All':None, 'Transform_Redraw': None}
#็ปๅธๅคงๅฐ
a_command['Resize'] = None
#A_Commandไฟกๅทๅๅฐ
self.CommandEmitApp.emit(a_command)
def run():
app = QtWidgets.QApplication(sys.argv)
ex = OFE_MainWindow()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
|
{"/OFE/OFE_Canvas.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py", "/OFE/__init__.py"], "/OFE/__init__.py": ["/OFE/OFE_Panels.py", "/OFE/OFE_Field.py", "/OFE/OFE_Buttoms.py", "/OFE/OFE_Status.py", "/OFE/OFE_Canvas.py", "/OFE/OFE_Files.py", "/OFE/OFE_Graphics.py"], "/OFE/OFE_Image.py": ["/OFE/__init__.py"], "/OFE/OFE_Buttoms.py": ["/OFE/__init__.py"], "/OFE/OFE_Files.py": ["/OFE/OFE_Field.py", "/OFE/OFE_Image.py"], "/OFE/OFE_main.py": ["/OFE/OFE_Field.py", "/OFE/__init__.py", "/OFE/OFE_Graphics.py"]}
|
22,099
|
dschien/shareapp
|
refs/heads/master
|
/api/models.py
|
from django.db import models
from django.contrib.auth.models import User
from django_extensions.db.models import TimeStampedModel
class UserProfile(models.Model):
"""
"""
user = models.OneToOneField(User, verbose_name="django authentication user", related_name='user_profile')
peers = models.ForeignKey(User, related_name='peers')
def __unicode__(self):
return "%s " % self.user.username
# Create your models here.
class Item(TimeStampedModel):
"""
An Item
"""
name = models.CharField(max_length=200)
description = models.TextField(null=True)
provider = models.ForeignKey(User, related_name='offered_items')
class Transaction(TimeStampedModel):
item = models.ForeignKey(Item, related_name='items')
consumer = models.ForeignKey(User, null=True, blank=True)
|
{"/api/views.py": ["/api/models.py"], "/shareapp/urls.py": ["/api/models.py"], "/api/tests.py": ["/api/models.py"]}
|
22,100
|
dschien/shareapp
|
refs/heads/master
|
/shareapp/local_settings_template.py
|
import sys
__author__ = 'schien'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'shareapp',
'USER': 'shareapp',
'HOST': 'localhost',
'PORT': '',
'PASSWORD': '',
}
}
# disable south for testing
SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
SKIP_SOUTH_TESTS = True # To disable South's own unit tests
# - See more at: http://www.celerity.com/blog/2013/04/29/how-write-speedy-unit-tests-django-part-1-basics/#sthash.9vDnOgRl.dpuf
if 'test' in sys.argv: DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
ALLOWED_HOSTS = ['fritz', 'localhost', 'dgd', '127.0.0.1']
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# for development without authentication
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# # for web auth
# # for oauth
# #'rest_framework.authentication.OAuth2Authentication',
# # 'rest_framework.authentication.BasicAuthentication',
# ),
# 'DEFAULT_PERMISSION_CLASSES': (
# #'rest_framework.permissions.IsAuthenticated',
# )
#
# # 'PAGINATE_BY': 10
#
# }
try:
from development_settings import *
except ImportError, e:
print 'Unable to load local_settings.py:', e
|
{"/api/views.py": ["/api/models.py"], "/shareapp/urls.py": ["/api/models.py"], "/api/tests.py": ["/api/models.py"]}
|
22,101
|
dschien/shareapp
|
refs/heads/master
|
/api/views.py
|
import json
from django.contrib.auth.models import User
from django.http import HttpResponse
# Create your views here.
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from api.models import Item, Transaction
@api_view(['POST'])
# @authentication_classes((OAuth2Authentication,))
@permission_classes((AllowAny,))
def requestItem(request, ):
item_id = request.DATA['id']
consumer_id = request.DATA['c_id']
item = Item.objects.get_object_or_404(id=item_id)
consumer = User.objects.get_object_or_404(id=consumer_id)
t = Transaction(item=item, consumer=consumer)
t.save()
return HttpResponse(json.dumps(1))
@api_view(['POST'])
# @authentication_classes((OAuth2Authentication,))
@permission_classes((AllowAny,))
def addItem(request, ):
name = request.DATA['name']
description = request.DATA['description']
user = request.user
item = Item(name=name, description=description, provider=user)
item.save()
return HttpResponse(json.dumps(1))
|
{"/api/views.py": ["/api/models.py"], "/shareapp/urls.py": ["/api/models.py"], "/api/tests.py": ["/api/models.py"]}
|
22,102
|
dschien/shareapp
|
refs/heads/master
|
/shareapp/urls.py
|
# from django.conf.urls.defaults import url, patterns, include
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import viewsets, routers
from api import views
from api.models import Item
admin.autodiscover()
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
model = User
class GroupViewSet(viewsets.ModelViewSet):
model = Group
# ViewSets define the view behavior.
class ItemViewSet(viewsets.ModelViewSet):
model = Item
# class GroupViewSet(viewsets.ModelViewSet):
# model = Group
# Routers provide an easy way of automatically determining the URL conf
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
# router.register(r'groups', GroupViewSet)
router.register(r'items', ItemViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'shareapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^apibrowse/', include(router.urls)),
url(r'^api/additem$', views.addItem, name='additem'),
url(r'^api/requestitem$', views.requestItem, name='requestitem'),
url(r'^$', login_required(TemplateView.as_view(template_name="index.html")), name="home"),
url(r'^social_login/', include('social.apps.django_app.urls', namespace='social')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('registration.backends.default.urls')),
)
|
{"/api/views.py": ["/api/models.py"], "/shareapp/urls.py": ["/api/models.py"], "/api/tests.py": ["/api/models.py"]}
|
22,103
|
dschien/shareapp
|
refs/heads/master
|
/api/tests.py
|
from django.test import TestCase
from rest_framework.test import APITestCase
from django.core.urlresolvers import reverse
# Create your tests here.
from api.models import Item
class ShareappTests(APITestCase):
"""
API functions for app login
"""
fixtures = ['test_data.json']
def test_anon_logging(self):
self.assertTrue(Item.objects.count() == 0)
data = {'message': 'Some message'}
response = self.client.post(reverse('app_log_message'), data)
self.assertTrue(response.status_code == 200)
self.assertTrue(response.content == "1")
self.assertTrue(Item.objects.count() == 1)
self.assertTrue(Item.objects.all()[0].message == "Some message")
self.assertTrue(Item.objects.all()[0].user is None)
|
{"/api/views.py": ["/api/models.py"], "/shareapp/urls.py": ["/api/models.py"], "/api/tests.py": ["/api/models.py"]}
|
22,108
|
xiaolin1529/pythonspider
|
refs/heads/master
|
/demoSpider/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DemospiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class HuanQiuItem(scrapy.Item):
title = scrapy.Field() # ๆ ้ข
summary = scrapy.Field() # ๅ
ณ้ฎ่ฏ
source_url = scrapy.Field() # ๆฐ้ป่ฏฆ็ปurl
source_name = scrapy.Field() # ๆฅๆบ็ฝ็ซ
display_date = scrapy.Field() # ๆฐ้ปๅๅธๆถ้ด
cover_url = scrapy.Field() # ๆฐ้ปๅฐ้ข
|
{"/demoSpider/spiders/demo_Spider.py": ["/demoSpider/items.py"]}
|
22,109
|
xiaolin1529/pythonspider
|
refs/heads/master
|
/demoSpider/spiders/demo_Spider.py
|
import scrapy as scrapy
import json
from demoSpider.items import HuanQiuItem
class demo_Spider(scrapy.Spider):
name = 'demo_Spider1'
allowed_domains = ['china.huanqiu.com']
start_urls = ['https://china.huanqiu.com/api/list2?node=/e3pmh1nnq/e7tl4e309&offset=0&limit=25']
# ่ชๅฎไน้
็ฝฎๆไปถ
custom_settings = {
# ๆๅฎ็ฎก้็ผๅญๆๅคๆฐๆฎๆกๆฐ
'ITEM_PIPELINES': {
'demoSpider.pipelines.HuanQiuPipeline': 300,
}
}
# no.1 ่งฃๆapiๆฅๅฃ๏ผ่ฟๅjsonๆฐๆฎ
def start_requests(self):
yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail, method='GET', headers=None,
errback=None)
def parse(self, response):
pass
def parse_detail(self, response: scrapy.http.Response):
news_data_list = response.text
json_news = json.loads(news_data_list)
for i in json_news['list']:
item = HuanQiuItem()
item['title'] = i['title']
item['summary'] = i['summary']
item['source_url'] = i['source']['url']
item['source_name'] = i['source']['name']
item['display_date'] = i['ext_displaytime']
item['cover_url'] = i['cover']
yield item
|
{"/demoSpider/spiders/demo_Spider.py": ["/demoSpider/items.py"]}
|
22,110
|
xiaolin1529/pythonspider
|
refs/heads/master
|
/demoSpider/pipelines.py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from twisted.enterprise import adbapi
import settings
class DemospiderPipeline(object):
def process_item(self, item, spider):
return item
class HuanQiuPipeline(object):
def __init__(self):
# ่ฟๆฅๆฐๆฎๅบ
self.connect = pymysql.connect(
host=settings.MYSQL_HOST,
db=settings.MYSQL_DBNAME,
user=settings.MYSQL_USER,
passwd=settings.MYSQL_PASSWD,
charset='utf8',
use_unicode=True)
# ้่ฟcursorๆง่กๅขๅ ๆฅๆน
self.cursor = self.connect.cursor();
def process_item(self, item, spider):
try:
sql = '''insert into scrapy.huanqiu_web (title,summary,source_url,source_name,display_time,cover_url) values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')''' %(
item['title'],item['summary'], item['source_url'], item['source_name'], item['display_date'],
item['cover_url'])
self.cursor.execute(sql)
self.connect.commit();
except Exception as e:
print('err:', e)
def close_spider(self, spider):
self.connect.close()
|
{"/demoSpider/spiders/demo_Spider.py": ["/demoSpider/items.py"]}
|
22,124
|
mawei1191546352/Commerce-Full-Stack-Web-App-using-Django
|
refs/heads/master
|
/auctions/views.py
|
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django import forms
from .models import User, Listing, Watchlist, Bids, Comments
class CreateListingForm(forms.Form):
create_title = forms.CharField(label="Item Title", max_length=50)
create_description = forms.CharField(label="Item Description", max_length=1000, widget=forms.Textarea)
create_size = forms.CharField(label="Item Size (Number or Letters)", max_length=10, required=False)
create_price = forms.IntegerField(label="Starting Price in USD ($)", max_value=32767, min_value=0)
choose_gender = forms.ChoiceField(label="Select Gender", choices=Listing.GENDER_CHOICES, required=False)
choose_category = forms.ChoiceField(label="Choose Clothing Category", choices=Listing.CATEGORY_CHOICES, required=False)
create_picture = forms.URLField(label="Link to a Photo", max_length=500, initial="https://images.unsplash.com/photo-1517502166878-35c93a0072f0?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=934&q=80", required=False)
class CreateCommentForm(forms.Form):
create_comment = forms.CharField(label="Post a Comment", max_length=500, widget=forms.Textarea)
def index(request):
return render(request, "auctions/index.html", {
"active_items": Listing.objects.filter(active=True),
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
def landing_page(request):
return render(request, "auctions/landing.html", {
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/login.html", {
"error_message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"username": username,
"email": email,
"password": password,
"passwords_unmatched": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"username": username,
"email": email,
"password": password,
"confirmation": confirmation,
"user_taken": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
def listing_page(request, itemid):
# Handle four different forms submissions
if request.method == "POST":
# User adds listing to Watchlist
if "watch" in request.POST:
# If entry for listing and user exists, then update existing entry
try:
watchlist = Watchlist.objects.get(user_id=request.user.id, listing_id=itemid)
watchlist.active = True
watchlist.save(update_fields=["active"])
return HttpResponseRedirect(reverse("listing", args=(itemid,)))
# If no entry for listing and user, then create new entry
except:
watchlist = Watchlist(user_id=request.user.id, listing_id=itemid, active=True)
watchlist.save()
return HttpResponseRedirect(reverse("listing", args=(itemid,)))
# User removes listing from Watchlist
if "unwatch" in request.POST:
watchlist = Watchlist.objects.get(user_id=request.user.id, listing_id=itemid)
watchlist.active = False
watchlist.save(update_fields=["active"])
return HttpResponseRedirect(reverse("listing", args=(itemid,)))
# Buyer bids on auction
if "bid" in request.POST:
listing = Listing.objects.get(pk=itemid)
price = listing.price
bid = int(request.POST.get("amount"))
try:
highest = Bids(user_id=request.user.id, listing_id=itemid).last()
except:
highest = 0
if bid < price and bid < highest:
return render(request, "auctions/listing.html", {
"error_message": "Bidding price must be greater than current price"
})
else:
bids = Bids(user_id=request.user.id, listing_id=itemid, offer=bid)
bids.save()
highestbid = Listing.objects.get(pk=itemid)
highestbid.highestbid = bid
highestbid.save(update_fields=["highestbid"])
return HttpResponseRedirect(reverse("listing", args=(itemid,)))
# Seller closes an auction
if "close" in request.POST:
listing = Listing.objects.get(pk=itemid)
listing.active = False
listing.save(update_fields=["active"])
return HttpResponseRedirect(reverse("inventory"))
# User posts comment
if "comment" in request.POST:
input = CreateCommentForm(request.POST)
if input.is_valid():
comment = Comments(user=request.user, comment=(input.cleaned_data["create_comment"]), timestamp=timezone.now(), listing=Listing(pk=itemid))
comment.save()
return HttpResponseRedirect(reverse("listing", args=(itemid,)))
else:
return render(request, "auctions/listing.html", {
"comment_form": input,
"error_message": "Sorry, the form was not valid"
})
else:
try:
watching = Watchlist.objects.get(user_id=request.user.id, listing_id=itemid)
except:
watching = None
try:
bids = Bids.objects.filter(listing_id=itemid)
except:
bids = None
try:
listing = Listing.objects.get(pk=itemid)
winner = Bids.objects.filter(listing_id=itemid).last()
except:
winner = None
return render(request, "auctions/listing.html", {
"listing": Listing.objects.get(pk=itemid),
"watching": watching,
"total_bids": len(bids),
"bid": bids,
"winner": winner,
"comments": Comments.objects.filter(listing_id=itemid),
"comment_form": CreateCommentForm(),
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
@login_required
def create_page(request):
if request.method == "POST":
input = CreateListingForm(request.POST)
if input.is_valid():
title = (input.cleaned_data["create_title"])
description = (input.cleaned_data["create_description"])
size = (input.cleaned_data["create_size"])
price = (input.cleaned_data["create_price"])
highestbid = (input.cleaned_data["create_price"])
gender = (input.cleaned_data["choose_gender"])
category = (input.cleaned_data["choose_category"])
picture = (input.cleaned_data["create_picture"])
user = request.user
l = Listing(user=user, title=title, description=description, size=size, price=price, highestbid=highestbid, gender=gender, category=category, photo_url=picture, timestamp=timezone.now())
l.save()
return HttpResponseRedirect(reverse("inventory"))
else:
return render(request, "auctions/create.html", {
"create_form": input,
"error_message": "Sorry, the form was not valid"
})
else:
return render(request, "auctions/create.html", {
"create_form": CreateListingForm(),
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
@login_required
def bids_page(request):
return render(request, "auctions/bids.html", {
"bids": Bids.objects.filter(user_id=request.user.id),
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
@login_required
def inventory_page(request):
return render(request, "auctions/inventory.html", {
"inventory": Listing.objects.filter(user=request.user),
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
@login_required
def watchlist_page(request):
try:
watchlist = Watchlist.objects.filter(user_id=request.user.id).values_list("listing_id")
watching = Listing.objects.filter(id__in = watchlist)
except:
watching = 0
return render(request, "auctions/watchlist.html", {
"watching": watching,
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
def category_page(request, selection):
try:
category = Listing.objects.filter(category=selection, active=True)
except:
category = None
try:
gender = Listing.objects.filter(gender=selection, active=True)
except:
gender = None
return render(request, "auctions/category.html", {
"selection": selection,
"category": category,
"gender": gender,
"watchlist": len(Watchlist.objects.filter(user_id=request.user.id)),
"total_items": len(Listing.objects.filter(user_id=request.user.id)),
"genders": Listing.GENDER_CHOICES,
"categories": Listing.CATEGORY_CHOICES
})
|
{"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]}
|
22,125
|
mawei1191546352/Commerce-Full-Stack-Web-App-using-Django
|
refs/heads/master
|
/auctions/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Listing, Bids, Comments
admin.site.register(Listing)
admin.site.register(Bids)
admin.site.register(Comments)
|
{"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]}
|
22,126
|
mawei1191546352/Commerce-Full-Stack-Web-App-using-Django
|
refs/heads/master
|
/auctions/migrations/0002_auto_20210201_0447.py
|
# Generated by Django 3.1.5 on 2021-02-01 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bids',
name='offer',
field=models.PositiveSmallIntegerField(),
),
migrations.AlterField(
model_name='listing',
name='price',
field=models.PositiveSmallIntegerField(),
),
]
|
{"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]}
|
22,127
|
mawei1191546352/Commerce-Full-Stack-Web-App-using-Django
|
refs/heads/master
|
/auctions/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("landing", views.landing_page, name="landing"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("listing/<int:itemid>", views.listing_page, name="listing"),
path("category/<str:selection>", views.category_page, name="category"),
path("add-listing", views.create_page, name="create"),
path("my-listings", views.inventory_page, name="inventory"),
path("my-bids", views.bids_page, name="bids"),
path("my-watchlist", views.watchlist_page, name="watchlist")
]
|
{"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]}
|
22,128
|
mawei1191546352/Commerce-Full-Stack-Web-App-using-Django
|
refs/heads/master
|
/auctions/models.py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
pass
class Listing(models.Model):
title = models.CharField(max_length=50)
description = models.TextField(max_length=1000)
size = models.CharField(max_length=10, blank=True)
price = models.PositiveSmallIntegerField()
highestbid = models.PositiveSmallIntegerField(blank=True)
photo_url = models.URLField(max_length=500, blank=True, default="https://images.unsplash.com/photo-1517502166878-35c93a0072f0?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=934&q=80")
timestamp = models.DateTimeField(auto_now=False, auto_now_add=False)
active = models.BooleanField(default=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
NEUTRAL = 'NRL'
WOMEN = 'WMN'
MEN = 'MEN'
GENDER_CHOICES = [
(NEUTRAL, 'Neutral'),
(WOMEN, 'Women'),
(MEN, 'Men')
]
gender = models.CharField(max_length=3, choices=GENDER_CHOICES, blank=True)
UNKNOWN = 'UNK'
ACCESSORIES = 'AC'
TOPS = 'TPS'
JACKETS = 'JKT'
SWEATERS = 'SWT'
SHIRTS = 'SRT'
SUITS = 'ST'
DRESSES = 'DRS'
PANTS = 'PN'
JEANS = 'JN'
SHORTS = 'SHR'
SWIM = 'SWM'
SHOES = 'SHO'
CATEGORY_CHOICES = [
(UNKNOWN, 'Unknown'),
(ACCESSORIES, 'Accessories'),
(TOPS, 'Tops'),
(JACKETS, 'Jackets'),
(SWEATERS, 'Sweaters'),
(SHIRTS, 'Shirts'),
(SUITS, 'Suits'),
(DRESSES, 'Dresses'),
(PANTS, 'Pants'),
(JEANS, 'Jeans'),
(SHORTS, 'Shorts'),
(SWIM, 'Swim'),
(SHOES, 'Shoes'),
]
category = models.CharField(max_length=3, choices=CATEGORY_CHOICES, blank=True)
def __str__(self):
return f"{self.user.username} created listing for '{self.title}' at ${self.price}"
class Watchlist(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
listing = models.ForeignKey(Listing, on_delete=models.CASCADE)
active = models.BooleanField(default=False)
class Bids(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
offer = models.PositiveSmallIntegerField()
listing = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="listing_bids")
def __str__(self):
return f"{self.user.username} placed bid for ${self.offer} for '{self.listing.title}'"
class Comments(models.Model):
comment = models.TextField(max_length=500)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=False)
user = models.ForeignKey(User, on_delete=models.CASCADE)
listing = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="listing_comments")
def __str__(self):
return f"{self.user.username} wrote '{self.comment}'' on {self.listing.title}"
|
{"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]}
|
22,131
|
Casey-S/CS1
|
refs/heads/master
|
/Gradebook/oop_classroom.py
|
from oop_students import Student
class Classroom(Student):
# Start of classroom class.
def __init__(self, class_name, teacher_name):
# Create class with class name, teacher name, and roster array.
self.class_name = class_name
self.teacher_name = teacher_name
self.roster = {}
def add_student(self, student_name, ID):
# Add student to roster array.
# self.roster[student_name] = super(Classroom, self).__init__(student_name, ID)
self.roster[student_name] = ID
def get_student_roster(self, roster):
# Print all currently enrolled students.
print(self.roster)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,132
|
Casey-S/CS1
|
refs/heads/master
|
/pythonIO/pythonIO.py
|
f = open('example.txt')
text = f.read()
f.close()
# Automatically close text file once done
with open('example.txt', 'w') as f:
f.write("Test words")
with open('example.txt') as f:
text = f.read()
print(text)
with open("example.txt", 'a') as f:
f.write('line 1 \n')
f.write('line 2 \n')
with open('example.txt') as f:
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,133
|
Casey-S/CS1
|
refs/heads/master
|
/pythonIO/sales_data.py
|
with open('sales_data.txt') as f:
index = 0
for index, line in enumerate(f):
index += 1
print(index)
with open('sales_data.txt') as f:
feb_list = []
for index, line in enumerate(f):
slash_pos = line.index('/')
if line[slash_pos - 1] is "2" and line[slash_pos - 2] is not "1":
feb_list.append(line)
# print(feb_list)
# print(len(feb_list))
# remove /t
# remove /n
# remove $
# .split = array of strings
# .replace = maintain string
raw_data = open('sales_data.txt')
def clean_up(raw_data):
cleaned_lines = []
for line in raw_data:
cleaned_line = line.replace('$', '')
cleaned_line = cleaned_line.replace('\n', '')
cleaned_line = cleaned_line.split('\t')
cleaned_line[3] = float(cleaned_line[3])
cleaned_lines.append(cleaned_line)
return cleaned_lines
cleaned_data = clean_up(raw_data)
cleaned_data_phillies = cleaned_data[:9]
phillies_sales = [i for i in cleaned_data_phillies if i[0] == 'Philadelphia']
print(phillies_sales)
total_sales = sum([i[3] for i in cleaned_data])
print(total_sales)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,134
|
Casey-S/CS1
|
refs/heads/master
|
/Gradebook/oop_students.py
|
class Student(object):
# Start of student class
def __init__(self, name, ID):
# Create student object with name and ID, create assignments dict.
self.name = name
self.ID = ID
self.assignments = {}
def add_assignment(self, assignment_name, score):
# Add student assignment to assignments array with corresponding score.
self.assignments[assignment_name] = score
def remove_assignment(self, assignment_name):
# Removes given assignment from assignment array.
del self.assignments[assignment_name]
def update_assignment(self, assignment_name, updated_score):
# Updates assignment with an updated score.
self.assignments[assignment_name] = updated_score
def get_score(self, assignment_name):
# Return the value of given assignment name.
return self.assignments.get(assignment_name)
def get_GPA(self, assignments):
# Returns average score from all assignments.
score_total = 0
for assignment in self.assignments:
score = self.assignments[assignment]
score_total += score
return score_total / len(self.assignments)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,135
|
Casey-S/CS1
|
refs/heads/master
|
/Gradebook/test_students.py
|
from oop_students import Student
def setup_student():
# Create a new student entry.
student = Student("Jeffrey Lebowski", 42)
return student
def setup_student_assignments():
# Add assignments to new student entry.
student = setup_student()
student.assignments = {"Retrieve Rug": 0, "Bowl": 70, "Abide": 100}
return student
def test_student():
# Test student creation.
student = setup_student()
assert student.name == "Jeffrey Lebowski"
assert student.ID == 42
def test_add_assignment():
# Test adding an assignment with score to assignments dict.
student = setup_student()
student.add_assignment("Defeat Nihilists", 20)
assert student.assignments["Defeat Nihilists"] == 20
def test_remove_assignment():
# Test removing entry from assignment dict.
student = setup_student_assignments()
student.remove_assignment("Retrieve Rug")
assert student.assignments == {"Bowl": 70, "Abide": 100}
def test_update_assignment():
# Test updating the score value in an assignment dict entry.
student = setup_student_assignments()
student.update_assignment("Bowl", 90)
assert student.assignments["Bowl"] == 90
def test_get_assignment_score():
# Test retrieving assignment dict entry value.
student = setup_student_assignments()
student.get_score("Abide")
assert student.get_score("Abide") == 100
def test_get_GPA():
# Test accuracy of GPA - (Abide + Bowl + Retrieve Rug) / 3
student = setup_student_assignments()
student.get_GPA(student.assignments)
assert student.get_GPA(student.assignments) == 56
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,136
|
Casey-S/CS1
|
refs/heads/master
|
/oop_test.py
|
# Implement the Animal superclass here
class Animal(object):
population = 0
def __init__(self, name):
Animal.population += 1
self.name = name
@classmethod
def populationCount(cls):
return population
def sleep(self):
print("%s sleeps for 8 hours" % self.name)
def eat(self, food):
print("%s eats %s" % (self.name, food))
if food == self.favoriteFood:
print("YUM! %s wants more %s" % (self.name, food))
# Implement the Tiger class here as a subclass of Animal
# Hint: Implement the initializer method only
class Tiger(Animal):
# Implement the initializer method here
def __init__(self, name):
super(Tiger, self).__init__(name)
self.name = name
self.favoriteFood = "meat"
# Implement the Bear class and its initializer, sleep and eat methods here
class Bear(Animal):
# Implement the initializer method here
def __init__(self, name):
super(Bear, self).__init__(name)
self.name = name
self.favoriteFood = "fish"
# Copy your sleep function here and modify it to work as a method
def sleep(self):
print("%s hibernates for 4 months" % self.name)
# Implement the Unicorn class here as a subclass of Animal
# Hint: Implement the initializer method and override the sleep method
class Unicorn(Animal):
def __init__(self, name):
super(Unicorn, self).__init__(name)
self.name = name
self.favoriteFood = "marshmallows"
def sleep(self):
print("%s sleeps in a cloud" % self.name)
# Implement the Giraffe class here as a subclass of Animal
# Hint: Implement the initializer method and override the eat method
class Giraffe(Animal):
def __init__(self, name):
super(Giraffe, self).__init__(name)
self.name = name
self.favoriteFood = "leaves"
def eat(self, food):
print("%s eats %s" % (self.name, food))
if food == self.favoriteFood:
print("YUM! %s wants more %s" % (self.name, food))
else:
print("YUCK! %s spits out %s" % (self.name, food))
# Implement the Bee class here as a subclass of Animal
# Hint: Implement the initializer method and override the sleep and eat methods
class Bee(Animal):
def __init__(self, name):
super(Bee, self).__init__(name)
self.name = name
self.favoriteFood = "pollen"
def eat(self, food):
print("%s eats %s" % (self.name, food))
if food == self.favoriteFood:
print("YUM! %s wants more %s" % (self.name, food))
else:
print("YUCK! %s spits out %s" % (self.name, food))
def sleep(self):
print("%s never sleeps" % self.name)
# Implement the Zookeeper class here
class Zookeeper(object):
# Implement the initializer method here
def __init__(self, name):
self.name = name
# Implement the feedAnimals method here
def feedAnimals(self, animals, food):
print("%s is feeding %s to %i of %i total animals" % (self.name, food, len(animals), Animal.population))
for animal in animals:
animal.eat(food)
animal.sleep()
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,137
|
Casey-S/CS1
|
refs/heads/master
|
/fizzbuzz.py
|
def fizzbuzz():
user_number = input("Enter a number: ")
if user_number % 3 == 0:
print("fizz")
if user_number % 5 == 0:
print("buzz")
if user_number % 3 != 0 and user_number % 5 != 0:
print(user_number)
fizzbuzz()
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,138
|
Casey-S/CS1
|
refs/heads/master
|
/roulette.py
|
# Build a working roulette game. At minimum, this script should
# Complete one round of roulette - but if you're up to the challenge,
# feel free to build a full command line interface through which
import random
random.seed(random)
bank_account = 1000
# bet_amount = 0
bet_color = None
bet_number = None
green = [0, 37]
red = [1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34, 36]
black = [2, 4, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 26, 28, 29, 31, 33, 35]
while True:
def take_bet():
global bank_account
Bet_color = raw_input("Enter bet color: ")
if Bet_color in ["red", "green", "black"]:
print("Color accepted")
# return Bet_color
else:
print("Invalid color")
pass
bet_amount = input("Bet amount: ")
bank_account = bank_account - bet_amount
print(bank_account)
return Bet_color, bet_amount
# bet_number = number
# bet_amount = amount
def roll_ball():
'''returns a random number between 0 and 37'''
Number_rolled = random.randint(0, 37)
return Number_rolled
def check_results():
'''Compares bet_color to color rolled.'''
'''Compares bet_number to number_rolled.'''
number_rolled = roll_ball()
bet_color = take_bet()
bet_amount = bet_color[1]
if number_rolled in red:
ball_color = "red"
elif number_rolled in black:
ball_color = "black"
elif number_rolled in green:
ball_color = "green"
print("Ball landed on %s" % ball_color)
if bet_color[0] == ball_color:
print("Color match!")
color_match = True
return color_match, bet_amount
else:
color_match = False
return color_match, bet_amount
def payout():
# returns total amount won or lost by user based on results of roll.
global bank_account
bet_net = check_results()
if bet_net[0] is True:
bet_amount = bet_net[1] * 2
bank_account = bank_account + bet_amount
print("You won %s" % bet_amount)
else:
# bank_account = bank_account - bet_net
print("You lost your wager of %s" % bet_net[1])
print("Your account balance is now %s" % bank_account)
def play_game():
"""This is the main function for the game.
When this function is called, one full iteration of roulette,
including:
Take the user's bet.
Roll the ball.
Determine if the user won or lost.
Pay or deduct money from the user accordingly.
"""
pass
payout()
while True:
answer = raw_input('Run again? (y/n): ')
if answer in ('y', 'n'):
break
print 'Invalid input.'
if answer == 'y':
continue
else:
print 'Goodbye'
break
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,139
|
Casey-S/CS1
|
refs/heads/master
|
/hangman.py
|
import random
def loadWord():
'''
Opens words.txt file as variable f.
Saves read lines to variable wordsList, then removes spaces from words.
Chooses a random word from wordsList and saves it as variable secretWord,
then returns it.
'''
f = open('words.txt', 'r')
wordsList = f.readlines()
f.close()
wordsList = wordsList[0].split(' ')
secretWord = random.choice(wordsList)
return secretWord
def getGuessedLetter(secretWord, letterArray, incorrectArray):
'''
secretWord: string, the random word the user is trying to guess.
letterArray: array of underscores the length of secretWord.
incorrectArray: array of incorrect guessed letters.
For letters in the word that the user has not yet guessed,
shown an _ (underscore) instead.
'''
userGuess = raw_input("Guess a letter: ")
if userGuess == secretWord:
print("WINNER")
exit()
if userGuess in secretWord:
print("%s is correct." % userGuess)
for i, letter in enumerate(secretWord):
if userGuess is letter:
letterArray[i] = letter
else:
print("Incorrect, try again.")
incorrectArray.append(userGuess)
# join all elements of letterArray into one element and print.
letterArray = ''.join([i + "" for i in letterArray])
print(letterArray)
if letterArray == secretWord:
print("WINNER")
exit()
# join all elements of incorrectArray into one element and print.
incorrectArray = ''.join([i + " " for i in incorrectArray])
print(incorrectArray)
def hangman(secretWord):
'''
Starts up a game of Hangman in the command line.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to guess one letter per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
'''
letterArray = ['_'] * len(secretWord)
incorrectArray = []
print("The word has %s letters" % len(secretWord))
numberOfGuesses = 10
while numberOfGuesses > 0:
print("You have %s guesses left" % numberOfGuesses)
getGuessedLetter(secretWord, letterArray, incorrectArray)
numberOfGuesses = numberOfGuesses - 1
print('The word was "%s!"' % secretWord)
secretWord = loadWord()
hangman(secretWord)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,140
|
Casey-S/CS1
|
refs/heads/master
|
/Gradebook/test_classroom.py
|
from oop_classroom import Classroom
def setup_classroom():
classroom = Classroom("CS1", "Yo Mamma")
return classroom
def test_add_student_to_roster():
classroom = setup_classroom()
classroom.add_student("Test Student", 22)
assert classroom.roster == {"Test Student": 22}
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,141
|
Casey-S/CS1
|
refs/heads/master
|
/pythonIO/nasa_api.py
|
import requests
start_date = '2017-10-21'
end_date = '2017-10-22'
nasa_response = requests.get('https://api.nasa.gov/neo/rest/v1/feed?start_date={}&end_date={}&api_key=DEMO_KEY'.format(start_date, end_date))
print(nasa_response.text)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,142
|
Casey-S/CS1
|
refs/heads/master
|
/test_hangman.py
|
import hangman
import pytest
'''
def isWordGuessed(secretWord, correctGuesses):
secretWord: string, the random word the user is trying to guess.
This is selected on line 9.
correctGuesses: list of letters that have been guessed correctly so far.
returns: boolean, True if all letters of secretWord are in correctGuesses;
False otherwise
if correctGuesses in secretWord:
return True
else:
return False
def testisWordGuessed():
x = isWordGuessed('cat', ['c', 't', 'a'])
assert x is True
x = isWordGuessed('cat', [])
assert x is False
'''
def test_getGuessedLetter(secretWord, correctGuesses):
'''
secretWord: string, the random word the user is trying to guess.
This is selected on line 9.
correctGuesses: list of letters that have been guessed correctly so far.
returns: string, of letters and underscores. For letters in the word that
the user has guessed correctly,
the string should contain the letter at the correct position.
For letters in the word that the user has not yet guessed,
shown an _ (underscore) instead.
'''
output = ['_'] * len(secretWord)
while True:
userGuess = raw_input("Guess a letter: ")
if userGuess in secretWord:
print("%s is correct." % userGuess)
for i, letter in enumerate(secretWord):
if userGuess is letter:
output[i] = letter
else:
print("Incorrect, try again.")
correctGuesses = ''.join([x + "" for x in output])
print(correctGuesses)
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,143
|
Casey-S/CS1
|
refs/heads/master
|
/algo.py
|
# beginning_number = input("Enter beginning number: ")
# ending_number = input("Enter ending number: ")
# def is_palindrome(input_string):
# split_str = list(input_string)
#
# if "" in split_str:
# print("Space")
# if split_str[0] == split_str[-1] and split_str[1] == split_str[-2]:
# print("palindrome!")
# else:
# print("Not palindrome")
#
#
# is_palindrome(raw_input("Enter test word: "))
def fib(n):
if n == 1 or n == 2:
return 1
if n == 0:
return 0
else:
return fib(n-1) + fib(n-2)
print(fib(10))
|
{"/test_hangman.py": ["/hangman.py"]}
|
22,148
|
megaturbo/timbreuse
|
refs/heads/master
|
/timbreuse.py
|
from flask import Flask, \
render_template
from flask_sqlalchemy import SQLAlchemy
from config import DevelopmentConfig as Config
from flask.ext.login import LoginManager
from flask import Flask, session, request, flash, url_for, redirect, render_template, abort, g
from flask.ext.login import login_user, logout_user, current_user, login_required
import datetime
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
from models import *
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
def index():
if current_user.is_authenticated:
current_project_id = current_user.current_project_id
current_project = Project.query.filter_by(id=current_project_id).first()
if current_project_id is not None:
tasks = Task.query.filter_by(project_id=int(current_project_id)).all()
current_timeslot = active_timeslot()
if current_timeslot is not None:
current_task = Task.query.filter_by(id=current_timeslot.task_id).first().name
return render_template('home.html', **locals())
else:
return render_template('index.html')
# ============================================================
# Authentication shit
# ============================================================
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'GET':
return render_template('register.html')
if request.form['username'] in (u.username for u in User.query.all()):
flash('username invalid')
return redirect(request.referrer)
user = User(request.form['username'], request.form['password'])
db.session.add(user)
db.session.commit()
flash('User successfully registered')
return redirect(url_for('login'))
@app.route('/login', methods=['GET','POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
remember_me = False
if 'remember_me' in request.form:
remember_me = True
registered_user = User.query.filter_by(username=username).first()
if registered_user is None or not registered_user.check_password(password):
flash('Username or Password is invalid' , 'error')
return redirect(url_for('login'))
login_user(registered_user, remember=remember_me)
flash('Logged in successfully')
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
# ============================================================
# Project shit
# ============================================================
@app.route('/new', methods=['GET', 'POST'])
@login_required
def new_project():
if request.method == 'GET':
return render_template('projects/new.html')
elif request.method == 'POST':
if len(request.form['project_name']) > 50:
flash('The name for your project is too long. 50 chars max.')
return redirect(request.referrer)
project = Project(request.form['project_name'])
current_user.projects.append(project)
db.session.add(project)
db.session.commit()
return redirect(url_for('index'))
@app.route('/project/<project_id>')
@login_required
def project(project_id):
project = Project.query.filter_by(id=project_id, user_id=current_user.id).first_or_404()
tasks = Task.query.filter_by(project_id=project.id).all()
timeslots = []
for t in tasks:
timeslots[len(timeslots):] = [x for x in t.timeslots]
timeslots = sorted(timeslots, key=lambda x: x.started_at)
timeslots = [(t, Task.query.filter_by(id=int(t.task_id)).first()) for t in timeslots]
return render_template('projects/show.html', **locals())
@app.route('/select', methods=['POST'])
@login_required
def select_shit():
current_project = request.form['current_project']
projects = current_user.projects
# cuz maybe user edited the html in the hidden input value
# so we still check values, yo
if int(current_project) not in (int(p.id) for p in projects):
flash('Don\'t fuck with us')
return redirect(request.referrer)
current_user.current_project_id = current_project
db.session.commit()
project = Project.query.filter_by(id=current_project).first().name
flash(u'Now working on {}'.format(project))
return end_timeslot()
# ============================================================
# Task shit
# ============================================================
@app.route('/newtask', methods=['GET', 'POST'])
@login_required
def new_task():
if request.method == 'GET':
return render_template('tasks/new.html')
elif request.method == 'POST':
if len(request.form['task_name']) > 50:
flash('The name for your task is too long. 50 chars max.')
return redirect(request.referrer)
task = Task(request.form['task_name'], request.form['task_comment'])
project = Project.query.filter_by(id=int(current_user.current_project_id)).first()
project.tasks.append(task)
db.session.add(task)
db.session.commit()
return redirect(url_for('index'))
@app.route('/task/<task_id>')
@login_required
def show_task(task_id):
task = Task.query.filter_by(id=task_id).first_or_404()
project = Project.query.filter_by(id=task.project_id).first()
if project.user_id != current_user.id:
flash('You fucker won\'t spy')
logout_user()
return redirect(url_for('index'))
timeslots = TimeSlot.query.filter_by(task_id=task.id).all()
return render_template('tasks/show.html', **locals())
@app.route('/newshit', methods=['POST'])
@login_required
def new_shit():
if current_user.current_project_id is None:
flash('Please activate a project')
return redirect(url_for('index'))
task_id = request.form['select_task']
task = Task.query.filter_by(project_id=int(current_user.current_project_id)).filter_by(id=task_id).first()
if task is None:
task = Task(taskname, '')
project = Project.query.filter_by(id=int(current_user.current_project_id)).first()
project.tasks.append(task)
db.session.add(task)
flash(u'Added task {} to project {}'.format(taskname, project.name))
lasttime = TimeSlot.query.filter_by(ended_at=None).first()
if lasttime is not None:
lasttime.ended_at = datetime.datetime.now()
flash(u'Previous time slot ended')
now = TimeSlot(request.form['comment'], datetime.datetime.now())
task.timeslots.append(now)
db.session.commit()
flash(u'Time slot added to task {}'.format(task.name))
return redirect(request.referrer)
@app.route('/edittaskcomment/<task_id>', methods=['POST'])
@login_required
def edit_task_comment(task_id):
task = Task.query.filter_by(id=task_id).first_or_404()
tasks = []
for p in current_user.projects:
tasks[len(tasks):] = [int(t.id) for t in p.tasks]
if int(task_id) not in tasks:
flash('I don\'t like you')
logout_user()
return redirect(url_for('index'))
task.description = request.form['description']
db.session.commit()
flash('Updated description')
return redirect(request.referrer)
@app.route('/edittimeslotcomment/<timeslot_id>', methods=['POST'])
@login_required
def edit_timeslot_comment(timeslot_id):
timeslot = TimeSlot.query.filter_by(id=timeslot_id).first_or_404()
timeslots = []
for p in current_user.projects:
for t in p.tasks:
timeslots[len(timeslots):] = [int(x.id) for x in t.timeslots]
if int(timeslot_id) not in timeslots:
flash('GTFO you hacker')
logout_user()
return redirect(url_for('index'))
timeslot.comment = request.form['comment']
db.session.commit()
flash('Updated comment')
return redirect(request.referrer)
@app.route('/endtimeslot', methods=['POST'])
@login_required
def end_timeslot():
current_timeslot = active_timeslot()
if current_timeslot is None:
return redirect(url_for('index'))
current_timeslot.ended_at = datetime.datetime.now()
db.session.commit()
flash('Timeslot ended')
return redirect(url_for('index'))
# ============================================================
# random
# ============================================================
def active_timeslot():
current_timeslots = TimeSlot.query.filter_by(ended_at=None).all()
timeslots = []
for p in current_user.projects:
for t in p.tasks:
timeslots[len(timeslots):] = [int(x.id) for x in t.timeslots]
for t in current_timeslots:
for u in timeslots:
if int(t.id) == u:
return t
else:
return None
if __name__ == '__main__':
app.run()
|
{"/timbreuse.py": ["/models.py"], "/models.py": ["/timbreuse.py"]}
|
22,149
|
megaturbo/timbreuse
|
refs/heads/master
|
/models.py
|
from timbreuse import db
from werkzeug.security import generate_password_hash, \
check_password_hash
class User(db.Model):
id = db.Column(db.Integer , primary_key=True)
username = db.Column('username', db.String(20), unique=True , index=True)
pw_hash = db.Column('pw_hash' , db.String(66))
current_project_id = db.Column('current_project_id', db.Integer)
projects = db.relationship('Project', backref='user', lazy='dynamic')
def __init__(self, username, password):
self.username = username
self.set_password(password)
def set_password(self, password):
self.pw_hash = generate_password_hash(password)
print(self.pw_hash)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User {}>'.format(self.username)
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
current_task_id = db.Column('current_task_id', db.Integer)
tasks = db.relationship('Task', backref='project', lazy='dynamic')
def __init__(self, name):
self.name = name
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
description = db.Column(db.Text)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
timeslots = db.relationship('TimeSlot', backref='task', lazy='dynamic')
def __init__(self, name, description):
self.name = name
self.description = description
class TimeSlot(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.Text)
started_at = db.Column(db.DateTime)
ended_at = db.Column(db.DateTime)
task_id = db.Column(db.Integer, db.ForeignKey('task.id'))
def __init__(self, comment, started_at):
self.comment = comment
self.started_at = started_at
|
{"/timbreuse.py": ["/models.py"], "/models.py": ["/timbreuse.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.