index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
35,486
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/proxies.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import threading
import requests
import sqlite3
import asyncio
import aiohttp
import random
import atexit
import yaml
import time
import sys
import os
sem = asyncio.Semaphore(50) # 信号量,控制协程数,防止爬的过快
yamlPath = 'config.yaml'
_yaml = open(yamlPath, 'r', encoding='utf-8')
cont = _yaml.read()
yaml_data = yaml.load(cont, Loader=yaml.FullLoader)
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from com.ConnectSqlite import ConnectSqlite
from com.headers import getheaders
conn = ConnectSqlite("./.SqliteData.db")
@atexit.register
def exit_handle():
conn.close_con()
print('代理Ip提取结束')
class Proxies:
def __init__(self, count=2, url='http://www.xicidaili.com', step=9, timeout=10):
self.count = count
self.url = url
self.step = step
self.urls = []
self.tkList = []
self.timeout = timeout
self.targeturl = 'http://icanhazip.com/'
def get_urls(self):
self.urls = ['{0}/nn/{1}'.format(self.url, r)
for r in range(1, self.count)]
self.urls.extend(['{0}/nt/{1}'.format(self.url, r)
for r in range(1, self.count)])
self.urls.extend(['{0}/wt/{1}'.format(self.url, r)
for r in range(1, self.count)])
def slice(self):
self.urls = [self.urls[i:i+self.step]
for i in range(0, len(self.urls), self.step)]
def checkip(self, ip):
headers = getheaders()
proxies = {"http": "http://" + ip, "https": "http://" + ip} # 代理ip
requests.adapters.DEFAULT_RETRIES = 3
proxyIP = "".join(ip.split(":")[0:1])
try:
response = requests.get(
url=self.targeturl, proxies=proxies, headers=headers, timeout=10, verify=False)
if proxyIP in response.text:
return True
else:
return False
except Exception:
print('代理Ip: {0} 已失效'.format(ip))
return False
def findip(self, html):
soup = BeautifulSoup(html, 'lxml')
all = soup.find_all('tr', class_='odd')
for i in all:
t = i.find_all('td')
ip = t[1].text + ':' + t[2].text
is_avail = self.checkip(ip)
if is_avail:
sql = """INSERT INTO proxyip VALUES ('{0}');""".format(ip)
print('代理Ip: {0} 插入成功'.format(ip) if conn.insert_update_table(
sql) else '代理Ip: {0} 插入失败'.format(ip))
async def get(self, u):
headers = getheaders()
async with sem:
async with aiohttp.ClientSession(headers=headers) as session:
try:
async with session.get(u, timeout=self.timeout) as resp:
return await resp.text()
except Exception:
print('异常数据跳过')
async def request(self, u):
result = await self.get(u)
tk = threading.Thread(target=self.findip, args=(result,))
tk.start()
self.tkList.append(tk)
def process(self):
for url in self.urls:
tasks = [asyncio.ensure_future(self.request(u)) for u in url]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
for tk in self.tkList:
tk.join()
if __name__ == "__main__":
count = yaml_data.get('COUNT')
count = count if count else 2
sql = '''CREATE TABLE `proxyip` (
`ip_port` VARCHAR(25) DEFAULT NULL PRIMARY KEY
)'''
print('创建代理表成功' if conn.create_tabel(sql) else '创建代理表失败')
p = Proxies(count=count, step=5)
p.get_urls()
p.slice()
p.process()
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,487
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/main.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import threading
from bs4 import BeautifulSoup
import random
import sys
import os
import re
import sqlite3
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Alignment
import yaml
import atexit
import time
import win32api
import signal
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
search_url = 'http://chn.lottedfs.cn/kr/search?comSearchWord={0}&comCollection=GOODS&comTcatCD=&comMcatCD=&comScatCD=&comPriceMin=&comPriceMax=&comErpPrdGenVal_YN=&comHsaleIcon_YN=&comSaleIcon_YN=&comCpnIcon_YN=&comSvmnIcon_YN=&comGiftIcon_YN=&comMblSpprcIcon_YN=&comSort=RANK%2FDESC&comListCount=20&txtSearchClickCheck=Y'
targeturl = 'http://icanhazip.com/' # 验证ip有效性的指定url
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
yamlPath = 'config.yaml'
_yaml = open(yamlPath, 'r', encoding='utf-8')
cont = _yaml.read()
yaml_data = yaml.load(cont, Loader=yaml.FullLoader)
pattern = re.compile('[0-9]+')
# 返回一个随机的请求头 headers
def getheaders():
user_agent_list = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36',
'Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14',
'Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14',
'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',
'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00',
'Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00',
'Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00',
'Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00',
'Mozilla/5.0 (Windows NT 5.1) Gecko/20100101 Firefox/14.0 Opera/12.0',
'Opera/9.80 (Windows NT 6.1; WOW64; U; pt) Presto/2.10.229 Version/11.62',
'Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.10.229 Version/11.62',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; de) Presto/2.9.168 Version/11.52',
'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.9.168 Version/11.51',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; de) Opera 11.51',
'Opera/9.80 (X11; Linux x86_64; U; fr) Presto/2.9.168 Version/11.50',
'Opera/9.80 (X11; Linux i686; U; hu) Presto/2.9.168 Version/11.50',
'Opera/9.80 (X11; Linux i686; U; ru) Presto/2.8.131 Version/11.11',
'Opera/9.80 (X11; Linux i686; U; es-ES) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/5.0 Opera 11.11',
'Opera/9.80 (X11; Linux x86_64; U; bg) Presto/2.8.131 Version/11.10',
'Opera/9.80 (Windows NT 6.0; U; en) Presto/2.8.99 Version/11.10',
'Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10',
'Opera/9.80 (Windows NT 6.1; Opera Tablet/15165; U; en) Presto/2.8.149 Version/11.1',
'Opera/9.80 (X11; Linux x86_64; U; Ubuntu/10.10 (maverick); pl) Presto/2.7.62 Version/11.01',
'Opera/9.80 (X11; Linux i686; U; ja) Presto/2.7.62 Version/11.01',
'Opera/9.80 (X11; Linux i686; U; fr) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; en-US) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; cs) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.1; U;) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.1; U; cs) Presto/2.7.62 Version/11.01',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101213 Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01',
'Mozilla/5.0 (Windows NT 6.1; U; nl; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01',
'Mozilla/5.0 (Windows NT 6.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; de) Opera 11.01',
'Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00',
'Opera/9.80 (X11; Linux i686; U; it) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.6.37 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; pl) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; ko) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; fi) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:23.0) Gecko/20131011 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Microsoft Windows NT 6.2.9200.0); rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:21.0.0) Gecko/20121011 Firefox/21.0.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; rv:21.0) Gecko/20130326 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130330 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64;) Gecko/20100101 Firefox/20.0',
'Mozilla/5.0 (Windows x86; rv:19.0) Gecko/20100101 Firefox/19.0',
'Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/19.0',
'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/18.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/4.0 (Compatible; MSIE 8.0; Windows NT 5.2; Trident/6.0)',
'Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 10.0; Windows 3.1)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; chromeframe/12.0.742.112)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET CLR 1.1.4322; .NET4.0C; Tablet PC 2.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; FunWebProducts)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.2; SV1; .NET CLR 3.3.69573; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.8.36217; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; .NET CLR 2.7.58687; SLCC2; Media Center PC 5.0; Zune 3.4; Tablet PC 3.6; InfoPath.3)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; Media Center PC 4.0; SLCC1; .NET CLR 3.0.04320)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; SLCC1; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)',
'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.0; Trident/4.0; FBSMTWB; .NET CLR 2.0.34861; .NET CLR 3.0.3746.3218; .NET CLR 3.5.33652; msn OptimizedIE8;ENUS)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; da-dk) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ko-KR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; fr-FR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; cs-CZ) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; sv-se) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ko-kr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; it-it) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-fr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; es-es) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-gb) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; sv-SE) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; hu-HU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it-IT) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-us) AppleWebKit/534.16+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-ch) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; ar) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-HK) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; tr-TR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; nb-NO) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; zh-cn) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']
headers = {'User-Agent': random.choice(user_agent_list)}
return headers
class ConnectSqlite:
def __init__(self, dbName="./sqlite3Test.db"):
"""
初始化连接--使用完记得关闭连接
:param dbName: 连接库名字,注意,以'.db'结尾
"""
self._conn = sqlite3.connect(dbName, timeout=3, isolation_level=None, check_same_thread=False)
self._cur = self._conn.cursor()
self._time_now = "[" + sqlite3.datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "]"
def close_con(self):
"""
关闭连接对象--主动调用
:return:
"""
self._cur.close()
self._conn.close()
def create_tabel(self, sql):
"""
创建表初始化
:param sql: 建表语句
:return: True is ok
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[CREATE TABLE ERROR]", e)
return False
def drop_table(self, table_name):
"""
删除表
:param table_name: 表名
:return:
"""
try:
self._cur.execute('DROP TABLE {0}'.format(table_name))
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[DROP TABLE ERROR]", e)
return False
def delete_table(self, sql):
"""
删除表记录
:param sql:
:return: True or False
"""
try:
if 'DELETE' in sql.upper():
self._cur.execute(sql)
self._conn.commit()
return True
else:
print(self._time_now, "[EXECUTE SQL IS NOT DELETE]")
return False
except Exception as e:
print(self._time_now, "[DELETE TABLE ERROR]", e)
return False
def fetchall_table(self, sql, limit_flag=True):
"""
查询所有数据
:param sql:
:param limit_flag: 查询条数选择,False 查询一条,True 全部查询
:return:
"""
try:
self._cur.execute(sql)
war_msg = self._time_now + ' The [{}] is empty or equal None!'.format(sql)
if limit_flag is True:
r = self._cur.fetchall()
return r if len(r) > 0 else war_msg
elif limit_flag is False:
r = self._cur.fetchone()
return r if len(r) > 0 else war_msg
except Exception as e:
print(self._time_now, "[SELECT TABLE ERROR]", e)
def insert_update_table(self, sql):
"""
插入/更新表记录
:param sql:
:return:
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT/UPDATE TABLE ERROR]", e)
return False
def insert_table_many(self, sql, value):
"""
插入多条记录
:param sql:
:param value: list:[(),()]
:return:
"""
try:
self._cur.executemany(sql, value)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT MANY TABLE ERROR]", e)
return False
out_path = yaml_data.get('OUT_FILE_PATH')
out_path = out_path if out_path else 'data/网上最新价格.xlsx'
try:
wb = load_workbook(out_path)
except FileNotFoundError as e:
wb = Workbook()
conn = ConnectSqlite("./sqlite3Ip.db")
notes_row = 2
@atexit.register
def exit_handle():
print('匹配到第 {} 件商品结束'.format(notes_row))
conn.insert_update_table('''UPDATE notes SET number={0} WHERE id={1}'''.format(notes_row, '520'))
wb.save(out_path)
conn.close_con()
def on_close(sig):
conn.insert_update_table('''UPDATE notes SET number={0} WHERE id={1}'''.format(notes_row, '520'))
wb.save(out_path)
conn.close_con()
sys.exit()
win32api.SetConsoleCtrlHandler(on_close, True)
class Main:
def __init__(self):
pass
def getIp(self):
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if isinstance(ip_list, list) and len(ip_list) <= 1:
self.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if len(ip_list) >= 10:
break
time.sleep(10)
elif isinstance(ip_list, str):
self.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if len(ip_list) >= 10:
break
time.sleep(10)
if isinstance(ip_list, list) and len(ip_list) > 0:
return random.choice(ip_list)
else:
raise RuntimeError('程序异常结束')
def requests_process(self, row, sku, commodity_name):
headers = getheaders() # 定制请求头
ip_tuple = self.getIp()
if not isinstance(ip_tuple, tuple):
return ['Ip代理获取失败', 0]
proxies = {"http": "http://" + ip_tuple[0], "https": "http://" + ip_tuple[0]} # 代理ip
requests.adapters.DEFAULT_RETRIES = 10
s = requests.session()
s.keep_alive = False
try:
response = requests.get(url=search_url.format(sku), proxies=proxies, headers=headers, timeout=10, verify=False)
if int(response.status_code) == 200:
soup = BeautifulSoup(response.text, 'lxml')
all_span = soup.select('#searchTabPrdList .imgType .listUl .productMd .price span')
if len(all_span) > 1:
return ['商品搜索条数错误', 0]
elif len(all_span) == 1:
match = pattern.findall(all_span[0].get_text())
if match:
return ['搜索成功', re.search(r'\d+(\.\d+)?', all_span[0].get_text()).group()]
else:
all_strong = soup.select('#searchTabPrdList .imgType .listUl .productMd .discount strong')
return ['搜索成功', re.search(r'\d+(\.\d+)?', all_strong[0].get_text()).group()]
else:
return ['商品没有搜到', 0]
elif int(response.status_code) == 403:
print('第 {0} 件商品搜索失败---sku号为:{1}--商品名称为:{2}--错误为:403错误'.format(row, sku, commodity_name))
conn.delete_table('''DELETE FROM proxyip WHERE ip_port='{0}';'''.format(ip_tuple[0]))
return self.requests_process(row, sku, commodity_name)
else:
return ['商品搜索失败', 0]
except Exception:
print('第 {0} 件商品匹配失败---sku号为:{1}--商品名称为:{2}--错误为:超时/代理错误'.format(row, sku, commodity_name))
conn.delete_table('''DELETE FROM proxyip WHERE ip_port='{0}';'''.format(ip_tuple[0]))
return self.requests_process(row, sku, commodity_name)
def process(self, rs, ws):
global notes_row
start_row = 2
start_row_list = conn.fetchall_table('''select number from notes where id = '520';''')
if len(start_row_list) > 0 and start_row_list[0][0]:
start_row = start_row_list[0][0]
notes_row = start_row
if start_row == 2:
for n in range(1, ws.max_row + 1):
ws.delete_rows(n)
wb.save(out_path)
if ws.max_row <= 1:
data_list = ['序号', 'sku', '品牌', '名称', '原价', '搜索结果', '网上价格']
ws.append(data_list)
wb.save(out_path)
for row in range(start_row, rs.max_row + 1):
sku_column = yaml_data.get('SKU_COLUMN') # sku
sku_column = sku_column if sku_column else 1
brand_column = yaml_data.get('BRAND_COLUMN') # 品牌
brand_column = brand_column if brand_column else 2
commodity_name_column = yaml_data.get('COMMODITY_NAME_COLUMN') # 商品名称
commodity_name_column = commodity_name_column if commodity_name_column else 3
original_price_column = yaml_data.get('ORIGINAL_PRICE_COLUMN') # 原价
original_price_column = original_price_column if original_price_column else 4
sku = rs.cell(row=row, column=sku_column).value
brand = rs.cell(row=row, column=brand_column).value
commodity_name = rs.cell(row=row, column=commodity_name_column).value
original_price = rs.cell(row=row, column=original_price_column).value
if sku and original_price:
data = self.requests_process(row, sku, commodity_name)
print('第 {0} 件商品处理结果为:{1}---sku号为:{2}---商品名称为:{3}---表格价格:{4}---网上价格为:{5}'.format(
row, data[0], sku, commodity_name, original_price, data[1]))
data_list = [row, sku, brand, commodity_name, original_price, data[0], data[1]]
ws.append(data_list)
wb.save(out_path)
else:
print('第 {0} 件商品匹配失败---sku号为:{1}---商品名称为:{2} --->sku非法或者价格非法'.format(row, sku, commodity_name))
data_list = [row, sku, brand, commodity_name, original_price, 'sku非法或者价格非法', 0]
ws.append(data_list)
wb.save(out_path)
notes_row = row
print('总共匹配了 {0} 件商品价格'.format(notes_row))
# -----------------------------------------------------检查ip是否可用----------------------------------------------------
def checkip(self, ip):
headers = getheaders() # 定制请求头
proxies = {"http": "http://" + ip, "https": "http://" + ip} # 代理ip
requests.adapters.DEFAULT_RETRIES = 3
thisIP = "".join(ip.split(":")[0:1])
try:
response = requests.get(url=targeturl, proxies=proxies, headers=headers, timeout=5)
if thisIP in response.text:
return True
else:
return False
except Exception:
return False
# -------------------------------------------------------获取代理方法----------------------------------------------------
# 免费代理 XiciDaili
def findip(self, type, pagenum): # ip类型,页码,目标url,存放ip的路径
list = {'1': 'http://www.xicidaili.com/wn/', # xicidaili国内https代理
'2': 'http://www.xicidaili.com/nn/', # xicidaili国内高匿代理
'3': 'http://www.xicidaili.com/nt/', # xicidaili国内普通代理
'4': 'http://www.xicidaili.com/wt/'} # xicidaili国外http代理
url = list[str(type)] + str(pagenum) # 配置url
headers = getheaders() # 定制请求头
try:
html = requests.get(url=url, headers=headers, timeout=5).text
soup = BeautifulSoup(html, 'lxml')
all = soup.find_all('tr', class_='odd')
for i in all:
t = i.find_all('td')
ip = t[1].text + ':' + t[2].text
is_avail = self.checkip(ip)
if is_avail:
sql = """INSERT INTO proxyip VALUES ('{0}');""".format(ip)
print('代理Ip: {0} 插入成功'.format(ip) if conn.insert_update_table(sql) else '代理Ip: {0} 插入失败'.format(ip))
except Exception:
print('代理Ip请求失败,可能Ip被禁止访问,请刷新网络Ip重启exe文件')
# -----------------------------------------------------多线程抓取ip入口---------------------------------------------------
def getip(self):
threads = []
for type in range(4): # 四种类型ip,每种类型取前三页,共12条线程
for pagenum in range(5):
t = threading.Thread(target=self.findip, args=(type + 1, pagenum + 1))
threads.append(t)
for s in threads: # 开启多线程爬取
s.start()
# -------------------------------------------------------读取文件execl-----------------------------------------------------------
def readFile(self):
ws = wb.active
ws.column_dimensions['A'].width = 12
ws.column_dimensions['A'].alignment = Alignment(horizontal='center', vertical='center')
ws.column_dimensions['C'].width = 36
ws.column_dimensions['C'].alignment = Alignment(horizontal='center', vertical='center')
file_path = yaml_data.get('FILE_PATH')
file_path = file_path if file_path else 'data/欧美韩免原价.xlsx'
rb = load_workbook(file_path)
sheets = rb.sheetnames
sheet = sheets[0]
rs = rb[sheet]
self.process(rs, ws)
# -------------------------------------------------------启动-----------------------------------------------------------
if __name__ == '__main__':
sql = '''CREATE TABLE `proxyip` (
`ip_port` VARCHAR(25) DEFAULT NULL PRIMARY KEY
)'''
print('创建代理表成功' if conn.create_tabel(sql) else '创建代理表失败')
sql1 = '''CREATE TABLE `notes` (
`id` VARCHAR(5) DEFAULT NULL PRIMARY KEY,
`number` int(6) DEFAULT NULL
)'''
if conn.create_tabel(sql1):
print('创建记录表成功')
conn.insert_update_table('''INSERT INTO notes VALUES ('520', 2);''')
else:
print('创建记录表失败')
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
m = Main()
if isinstance(ip_list, list) and len(ip_list) <= 10:
m.getip()
elif isinstance(ip_list, str):
m.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if isinstance(ip_list, list) and len(ip_list) >= 5:
break
time.sleep(3)
if isinstance(ip_list, list):
print('---Ip代理数量不够,正常5个,等待数量满足开始匹配,当前代理Ip个数为:({0})---'.format(len(ip_list)))
else:
print('---Ip代理数量不够,正常5个,等待数量满足开始匹配,当前代理Ip个数为:({0})---'.format(0))
m.readFile()
input('点击右上角关闭')
while True:
time.sleep(60)
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,488
|
open-pythons/lottedfs
|
refs/heads/master
|
/com/test.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import re
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
from com.headers import getheaders
from com.ConnectSqlite import ConnectSqlite
if __name__ == "__main__":
conn = ConnectSqlite('./.SqliteData.db')
print(conn.fetchall_table('''select sku, original_price, code from originaldata where sku='2069802403';'''))
|
{"/com/processxlsx.py": ["/com/ConnectSqlite.py"], "/com/processdata.py": ["/com/ConnectSqlite.py"], "/com/proxies.py": ["/com/ConnectSqlite.py"], "/com/test.py": ["/com/ConnectSqlite.py"]}
|
35,490
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/api/urls.py
|
from .views import *
from django.urls import path
urlpatterns = [
path('id/<id>', ImageAPI.as_view()),
]
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,491
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/images/views.py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth import authenticate, login, logout
from django.views.generic import ListView, View
from django.contrib import messages
from django.urls import reverse
from .models import *
from .forms import *
class all_(View):
def get(self, *args, **kwargs):
image = Images.objects.all()
context = {'image': image}
return render(self.request, 'all_images.html', context)
def likes(request, pk):
image = get_object_or_404(Images, pk=pk)
if image:
image.likes += 1
image.save()
return redirect("images:all")
class nature(View):
def get(self, *args, **kwargs):
image = Images.objects.all()
one = Images.objects.filter(category=1)
context = {'image': image, 'one': one}
return render(self.request, 'nature.html', context)
def likes_n(request, pk):
image = get_object_or_404(Images, pk=pk)
if image:
image.likes += 1
image.save()
return redirect("images:nature")
class space(View):
def get(self, *args, **kwargs):
image = Images.objects.all()
two = Images.objects.filter(category=2)
context = {'image': image, 'two': two}
return render(self.request, 'space.html', context)
def likes_s(request, pk):
image = get_object_or_404(Images, pk=pk)
if image:
image.likes += 1
image.save()
return redirect("images:space")
class wildlife(View):
def get(self, *args, **kwargs):
image = Images.objects.all()
three = Images.objects.filter(category=3)
context = {'image': image, 'three': three}
return render(self.request, 'wildlife.html', context)
def likes_w(request, pk):
image = get_object_or_404(Images, pk=pk)
if image:
image.likes += 1
image.save()
return redirect("images:wildlife")
class popular_sort(View):
def get(self, *args, **kwargs):
popular = Images.objects.all().order_by('-likes')
context = {'popular': popular}
return render(self.request, 'all_images_pop.html', context)
class newest_sort(View):
def get(self, *args, **kwargs):
ordered = Images.objects.all().order_by('-time')
context = {'ordered': ordered}
return render(self.request, 'all_images_new.html', context)
def upload(request):
form = UploadForm()
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
messages.success(request, 'Successfully Submitted.')
form.save()
return redirect('/images/upload')
context = {'form': form}
return render(request, 'upload.html', context)
def register(request):
form = RegisterUserForm()
if request.method == 'POST':
form = RegisterUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account Created. Welcome {username}')
return redirect('/images/register')
context = {'form': form}
return render(request, 'register.html', context)
def login_p(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.info(request, 'Username Or Password Incorrect')
context = {}
return render(request, 'login.html', context)
def logout_p(request):
logout(request)
return redirect('/images/login')
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,492
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/images/admin.py
|
from django.contrib import admin
from .models import *
from . import models
admin.site.register(Images)
class Author(admin.ModelAdmin):
list_display = ('title', 'id', 'status', 'slug', 'author')
prepopulated_fields = {'slug': 'title'}
admin.site.register(Category)
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,493
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/images/forms.py
|
from .models import *
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
class UploadForm(forms.ModelForm):
class Meta:
model = Images
fields = ['title', 'category', 'image']
class RegisterUserForm(UserCreationForm):
error_messages = {
'Error.'
}
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,494
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/images/urls.py
|
from .views import *
from django.urls import path, reverse
from django.views.generic import TemplateView, View
app_name = 'images'
urlpatterns = [
path('', TemplateView.as_view(template_name='home.html')),
path('images/all', all_.as_view(), name='all'),
path('likes/<int:pk>', likes, name='likes'),
path('images/nature', nature.as_view(), name='nature'),
path('likes_n/<int:pk>', likes_n, name='likes_n'),
path('images/space', space.as_view(), name='space'),
path('likes_s/<int:pk>', likes_s, name='likes_s'),
path('images/wildlife', wildlife.as_view(), name='wildlife'),
path('likes_w/<int:pk>', likes_w, name='likes_w'),
path('images/popular', popular_sort.as_view(), name='popular'),
path('images/new', newest_sort.as_view(), name='new'),
path('images/upload', upload, name='upload'),
path('images/login', login_p, name='login'),
path('images/logout', logout_p, name='logout'),
path('images/register', register, name='register'),
]
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,495
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/api/views.py
|
from .custom import *
from images.models import *
from django.shortcuts import render
from rest_framework import generics
from rest_framework.response import Response
class ImageAPI(generics.RetrieveAPIView):
renderer_classes = [JPEGRenderer]
def get(self, request, *args, **kwargs):
queryset = Images.objects.get(id=self.kwargs['id']).image
data = queryset
return Response(data, content_type='image/jpg')
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,496
|
Thomas-Nexus/Django_Image_API
|
refs/heads/main
|
/images/models.py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
def dir_path(instance, filename):
return 'images/{0}/'.format(filename)
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Images(models.Model):
title = models.CharField(max_length=200)
image = models.ImageField(upload_to=dir_path, blank=False)
category = models.ForeignKey(Category, on_delete=models.PROTECT, default=1)
likes = models.IntegerField(default=0)
time = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.PROTECT, related_name='author', null=True)
def __str__(self):
return self.title
|
{"/api/urls.py": ["/api/views.py"], "/images/views.py": ["/images/models.py", "/images/forms.py"], "/images/admin.py": ["/images/models.py"], "/images/forms.py": ["/images/models.py"], "/images/urls.py": ["/images/views.py"], "/api/views.py": ["/images/models.py"]}
|
35,497
|
selimfirat/ocfpad
|
refs/heads/master
|
/plot_helpers.py
|
# Reference: https://gitlab.idiap.ch/biometric-resources/lab-pad/blob/master/notebook/plot.py
import numpy
import bob.measure
from matplotlib import pyplot
def plot_scores_distributions(scores_dev, scores_eval, path, title='Score Distribution', n_bins=50, threshold_height=1,
legend_loc='best'):
"""
Parameters
----------
scores_dev : list
The list containing negative and positive scores for the dev set
scores_eval : list
The list containing negative and positive scores for the eval set
title: string
Title of the plot
n_bins: int
Number of bins in the histogram
"""
# compute the threshold on the dev set
neg_dev = scores_dev[0]
pos_dev = scores_dev[1]
threshold = bob.measure.eer_threshold(scores_dev[0], scores_dev[1])
f, ax = pyplot.subplots(1, 2, figsize=(15, 5))
f.suptitle(title, fontsize=20)
ax[0].hist(scores_dev[1], density=False, color='C1', bins=n_bins, label='Bona-fide')
ax[0].hist(scores_dev[0], density=False, color='C7', bins=n_bins, alpha=0.4, hatch='\\\\',
label='Presentation Attack')
ax[0].vlines(threshold, 0, threshold_height, colors='r', linestyles='dashed', label='EER Threshold')
ax[0].set_title('Development set')
ax[0].set_xlabel("Score Value")
ax[0].set_ylabel("Probability Density")
ax[0].legend(loc=legend_loc)
ax[1].hist(scores_eval[1], density=False, color='C1', bins=n_bins, label='Bona-fide')
ax[1].hist(scores_eval[0], density=False, color='C7', bins=n_bins, alpha=0.4, hatch='\\\\',
label='Presentation Attack')
ax[1].vlines(threshold, 0, threshold_height, colors='r', linestyles='dashed', label='EER Threshold')
ax[1].set_title('Evaluation set')
ax[1].set_xlabel("Score Value")
ax[1].set_ylabel("Probability Density")
ax[1].legend(loc=legend_loc)
pyplot.savefig(path)
pyplot.clf()
def compare_dets(scores_neg, scores_pos, labels, ax_lim=[0.01, 90, 0.01, 90]):
"""
Parameters
----------
scores_eval: list
The list of scores
labels: list
The labels
"""
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7']
assert len(scores_neg) == len(labels)
assert len(scores_pos) == len(labels)
assert len(scores_neg) <= len(colors)
n_points = 100
pyplot.figure(figsize=(7, 5))
pyplot.title('DET curves', fontsize=16, pad=10)
for i in range(len(scores_neg)):
bob.measure.plot.det(scores_neg[i], scores_pos[i], n_points, color=colors[i], linestyle='-', label=labels[i])
bob.measure.plot.det_axis(ax_lim)
pyplot.xlabel('APCER (%)')
pyplot.ylabel('BPCER (%)')
pyplot.legend()
pyplot.grid(True)
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,498
|
selimfirat/ocfpad
|
refs/heads/master
|
/num_frames_hist.py
|
import numpy as np
import h5py
import os
import seaborn as sns
import matplotlib.pyplot as plt
from h5py import Dataset
import pandas as pd
sns.set(style="white", palette="muted", color_codes=True)
rs = np.random.RandomState(10)
# Set up the matplotlib figure
f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)
data = ["replay_attack", "replay_mobile"]
for datum in data:
data_path = os.path.join("/mnt/storage2/pad/", datum, "vgg16_frames.h5")
f = h5py.File(data_path, "r")
num_frames = []
for fk, fv in f.items():
for tidx, typ in fv.items():
for vidx, vid in typ.items():
if type(vid) is Dataset:
num_frames.append(vid.shape[0])
else:
for viddx, vidd in vid.items():
num_frames.append(vidd.shape[0])
ax = sns.distplot(num_frames, kde=False, color="b", ax=axes)
ax.set(title = datum.replace("_", " ").title() + " - Number of Frames Histogram", ylabel = "Number of Videos", xlabel="Number of Frames")
plt.savefig(f"figures/num_frames_{datum}.pdf")
df = pd.DataFrame(num_frames)
print(datum)
print(df.describe())
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,499
|
selimfirat/ocfpad
|
refs/heads/master
|
/convlstm_autoencoder.py
|
import argparse
import numpy as np
import os
import pandas as pd
import torch
from pyod.models.base import BaseDetector
from pyod.utils import invert_order
from sklearn.metrics import roc_curve, roc_auc_score
from torch import nn
from torch.autograd import Variable
from torch.optim import Adam
from tqdm import tqdm
import h5py
from convlstm_cell import ConvLSTMCell
import torch
np.random.seed(0)
torch.manual_seed(0)
class ConvLSTMAutoencoder():
def __init__(self, num_epochs=1, lr=0.001, cuda_idx=0, reg=0.5, kernel_size=3):
self.cuda_idx = cuda_idx
self.model = ConvLSTMCell(in_channels=3, out_channels=3, kernel_size=kernel_size, cuda_idx=cuda_idx).cuda(self.cuda_idx)
self.optimizer = Adam(self.model.parameters(), lr=lr) # default lr: 1e-3
self.num_epochs = num_epochs
self.reg_coef = reg
def initial_hidden(self):
return (Variable(torch.zeros(1, 3, 224, 224)).cuda(self.cuda_idx),
Variable(torch.zeros(1, 3, 224, 224)).cuda(self.cuda_idx))
def partial_fit_video(self, X, y):
h, c = self.initial_hidden()
self.model.zero_grad()
self.optimizer.zero_grad()
y_pred = torch.empty(X.shape).cuda(self.cuda_idx)
for fidx in range(X.shape[0]):
frame = X[fidx, :, :, :]
h, c = self.model.forward(frame, h, c)
y_pred[fidx, :, :, :] = h
loss = torch.mean((y_pred - y) ** 2)
if self.reg_coef > 0:
reg = 0
for param in self.model.parameters():
reg += (param ** 2).sum()
loss += reg * self.reg_coef
loss.backward(retain_graph=True)
self.optimizer.step()
return loss.item()
# return h, c # to make stateful
def fit(self, X):
self._classes = 2
data = [x.unsqueeze(1) for x in X]
targets = [data[i].clone() for i in range(len(data))]
for i in range(self.num_epochs):
losses = []
for X_vid, y_vid in tqdm(zip(data, targets)):
X_vid = X_vid.cuda(self.cuda_idx)
y_vid = y_vid.cuda(self.cuda_idx)
loss = self.partial_fit_video(X_vid, y_vid)
losses.append(loss)
mean_loss = np.array(losses).mean()
print(mean_loss)
# self.decision_scores_ = invert_order(self.decision_function(X))
# self._process_decision_scores()
def decision_function(self, X):
data = [x.unsqueeze(1) for x in X]
targets = [data[i].clone() for i in range(len(data))]
reconstruction_errors = np.empty((len(X), 1))
for idx, (X_vid, y_vid) in enumerate(zip(data, targets)):
X_vid = X_vid.cuda(self.cuda_idx)
y_vid = y_vid.cuda(self.cuda_idx)
h, c = self.initial_hidden()
y_pred = torch.empty(X_vid.shape).cuda(self.cuda_idx)
for fidx in range(X_vid.shape[0]):
frame = X_vid[fidx, :, :, :]
h, c = self.model.forward(frame, h, c)
y_pred[fidx, :, :, :] = h
reconstruction_errors[idx] = torch.mean((y_pred - y_vid) ** 2).item()
return reconstruction_errors
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,500
|
selimfirat/ocfpad
|
refs/heads/master
|
/extract_openface.py
|
import os
from tqdm import tqdm
input_path = "/mnt/storage2/pad/videos"
output_path = "/mnt/storage2/pad/frames"
docker_path = "/home/openface-build/build/bin"
videos_path = "/home/openface-build/videos"
for root, dirs, files in tqdm(os.walk(input_path)):
hpath = root.replace(input_path, "").strip("/")
print(hpath)
for file in tqdm(files):
if not file.endswith(".mov"):
continue
fpath = os.path.join(videos_path, hpath, file)
exec_command = f"docker exec -it openface bash -c 'cd {docker_path}; ./FeatureExtraction -f {fpath} -simsize 224 -simalign'"
os.system(exec_command)
aligned_frames_path = os.path.join(docker_path, "processed", file.replace(".mov", "_aligned"))
res_path = os.path.join(output_path, hpath)
if not os.path.exists(res_path):
os.makedirs(res_path)
cp_command = f"docker cp openface:{aligned_frames_path} {res_path}"
os.system(cp_command)
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,501
|
selimfirat/ocfpad
|
refs/heads/master
|
/convlstm_cell.py
|
import torch
from torch import nn
from torch import tanh, sigmoid, zeros
from torch.autograd import Variable
class ConvLSTMCell(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, cuda_idx):
super().__init__()
padding = int((kernel_size - 1) / 2)
self.Wxi = nn.Conv2d(in_channels, out_channels, kernel_size, 1, padding, bias=True)
self.Wxf = nn.Conv2d(in_channels, out_channels, kernel_size, 1, padding, bias=True)
self.Wxc = nn.Conv2d(in_channels, out_channels, kernel_size, 1, padding, bias=True)
self.Wxo = nn.Conv2d(in_channels, out_channels, kernel_size, 1, padding, bias=True)
self.Whi = nn.Conv2d(out_channels, out_channels, kernel_size, 1, padding, bias=False)
self.Whf = nn.Conv2d(out_channels, out_channels, kernel_size, 1, padding, bias=False)
self.Whc = nn.Conv2d(out_channels, out_channels, kernel_size, 1, padding, bias=False)
self.Who = nn.Conv2d(out_channels, out_channels, kernel_size, 1, padding, bias=False)
self.Wi = nn.Parameter(Variable(zeros(1, out_channels, 224, 224)).cuda(cuda_idx))
self.Wf = nn.Parameter(Variable(zeros(1, out_channels, 224, 224)).cuda(cuda_idx))
self.Wo = nn.Parameter(Variable(zeros(1, out_channels, 224, 224)).cuda(cuda_idx))
def forward(self, x, h, c):
i = sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wi)
f = sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wf)
c_t = f * c + i * tanh(self.Wxc(x) + self.Whc(h))
o = sigmoid(self.Wxo(x) + self.Who(h) + c_t * self.Wo)
h_t = o * tanh(c_t)
return h_t, c_t
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,502
|
selimfirat/ocfpad
|
refs/heads/master
|
/extract_features.py
|
import h5py
import PIL
import argparse
import skvideo.io
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from tqdm import tqdm
from bob.ip.qualitymeasure import galbally_iqm_features as iqm
import numpy as np
import os
class VideoFrames(Dataset):
def __init__(self, tensors, transform=None):
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[index]
if self.transform:
x = self.transform(x)
return x
def __len__(self):
return self.tensors.size(0)
parser = argparse.ArgumentParser("Extract VGG Features From Videos")
parser.add_argument("--input", default="/mnt/storage2/pad/videos/replay_mobile/", type=str, help="Input directory to be extracted.")
parser.add_argument("--output", default="/mnt/storage2/pad/replay_mobile/image_quality.h5", help="Output file for frames to write.")
parser.add_argument("--device", default="cuda:1", type=str)
parser.add_argument("--feature", default="image_quality", type=str, choices=["vgg16", "raw", "vggface", "image_quality"])
parser.add_argument("--type", default="frame", type=str, choices=["frame", "face"])
args = vars(parser.parse_args())
input_path = args["input"]
output_path = args["output"]
device = torch.device(args["device"] if torch.cuda.is_available() else "cpu")
# VGG16
if args["feature"] == "vgg16":
vgg16 = models.vgg16(pretrained=True).to(device)
vgg_extractor = torch.nn.Sequential(
# stop at conv4
*list(vgg16.classifier)[:-2]
).to(device)
elif args["feature"] == "vggface":
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from keras_vggface.vggface import VGGFace
from keras.engine import Model
from keras_vggface import utils
vggface = VGGFace(model='vgg16')
out = vggface.get_layer("fc7/relu").output
vggface_new = Model(vggface.input, out)
def vgg16_features(x):
x = x.to(device)
x = vgg16.features(x)
x = vgg16.avgpool(x)
x = torch.flatten(x, 1)
x = vgg_extractor(x)
x = x.detach().cpu().numpy()
return x
def vggface_features(x):
x = x.detach().cpu().numpy()
## VGGFACE DISCLAIMER: Note that when using TensorFlow, for best performance you should set `image_data_format="channels_last"` in your Keras config at ~/.keras/keras.json.
x = utils.preprocess_input(x, data_format="channels_first", version=1)
res_arr = vggface_new.predict(x)
return res_arr
def image_quality_features(x):
x = x.detach().cpu().numpy()
res = []
for i in range(x.shape[0]):
rx = iqm.compute_quality_features(x[i, :, :, :])
res.append(np.array(rx))
res = np.array(res)
return res
def raw_features(x):
x = x.detach().cpu().numpy()
x *= 255
x = x.astype(np.uint8)
# print(x.max(), x.min(), x.mean(), x.std())
return x
def crop_faces(r, bboxes):
res = []
for i in range(r.shape[0]):
if "replay_attack" in args["input"]:
_, x, y, w, h = bboxes[i]
else:
x, y, w, h = bboxes[i]
a = r[i, :, y:y+h+1, x:x+w+1]
res.append(a)
return r
def extract_features(inp, feature_extractor, bboxes):
r = skvideo.io.vread(inp)
r = r.transpose((0, 3, 1, 2))
if args["type"] == "face":
r = crop_faces(r, bboxes)
r = torch.tensor(r)
complst = [
transforms.ToPILImage(),
transforms.Resize((224, 224), interpolation=PIL.Image.BILINEAR),
transforms.ToTensor()
]
if args["feature"] == "vgg16":
complst.append(
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
)
comp = transforms.Compose(complst)
dl = torch.utils.data.DataLoader(VideoFrames(r, transform=comp), batch_size=64, num_workers=8,
shuffle=False, pin_memory=True)
batches = []
for i, inp in enumerate(dl):
res = feature_extractor(inp)
batches.append(res)
res_arr = np.concatenate(batches, axis=0)
return res_arr
f = h5py.File(output_path, 'a')
for root, dirs, files in tqdm(os.walk(input_path)):
hpath = root.replace(input_path, "")
res_dir = os.path.join(output_path, hpath)
print(hpath)
for file in tqdm(files):
if not file.endswith(".mov"):
continue
inp = os.path.join(root, file)
bboxes = None
if args["type"] == "face":
if "replay_mobile" in args["input"]:
faces_path = os.path.join(args["input"], "faceloc", "rect", hpath, file.replace(".mov", ".face"))
elif "replay_attack" in args["input"]:
faces_path = os.path.join(args["input"], "face-locations", hpath, file.replace(".mov", ".face"))
bboxes = open(faces_path, "r").readlines()
for i in range(len(bboxes)):
bboxes[i] = list(map(int, bboxes[i].split()))
bboxes = np.array(bboxes)
res_arr = extract_features(inp, globals()[args["feature"] + "_features"], bboxes)
g = f[hpath] if hpath in f else f.create_group(hpath)
g.create_dataset(file, data=res_arr)
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,503
|
selimfirat/ocfpad
|
refs/heads/master
|
/boxplots.py
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
df = pd.read_csv("results.csv")
ax = df[(df["data"]=="replay_mobile") & (df["features"] != "vgg16_normalized_faces")].boxplot("dev_eer", by=["normalize"], return_type='axes')
plt.title("Replay-Mobile")
plt.suptitle("")
plt.xlabel("Normalization")
plt.ylabel("Dev EER Score")
plt.ylim((0.25, 0.5))
plt.savefig("figures/normalization_replaymobile.pdf")
plt.clf()
ax = df[df["data"]=="replay_attack"].boxplot("dev_eer", by=["normalize"], return_type='axes')
plt.title("Replay-Attack")
plt.suptitle("")
plt.xlabel("Normalization")
plt.ylim((0.25, 0.5))
plt.ylabel("Dev EER Score")
plt.savefig("figures/normalization_replayattack.pdf")
plt.clf()
ax = df[(df["data"]=="replay_mobile") & (df["features"] != "vgg16_normalized_faces")].boxplot("dev_eer", by=["features"], return_type='axes')
plt.title("Replay-Mobile")
plt.suptitle("")
plt.xlabel("Feature")
plt.ylabel("Dev EER Score")
plt.ylim((0.25, 0.5))
plt.savefig("figures/feature_replaymobile.pdf")
plt.clf()
ax = df[df["data"]=="replay_attack"].boxplot("dev_eer", by=["features"], return_type='axes')
plt.title("Replay-Attack")
plt.suptitle("")
plt.xlabel("Feature")
plt.ylim((0.25, 0.5))
plt.ylabel("Dev EER Score")
plt.savefig("figures/feature_replayattack.pdf")
plt.clf()
ax = df[(df["data"]=="replay_mobile") & (df["features"] != "vgg16_normalized_faces")].boxplot("dev_eer", by=["aggregate"], return_type='axes')
plt.title("Replay-Mobile")
plt.suptitle("")
plt.xlabel("Aggregation")
plt.ylabel("Dev EER Score")
plt.ylim((0.25, 0.5))
plt.savefig("figures/aggregate_replaymobile.pdf")
plt.clf()
ax = df[df["data"]=="replay_attack"].boxplot("dev_eer", by=["aggregate"], return_type='axes')
plt.title("Replay-Attack")
plt.suptitle("")
plt.xlabel("Aggregation")
plt.ylim((0.25, 0.5))
plt.ylabel("Dev EER Score")
plt.savefig("figures/aggregate_replayattack.pdf")
plt.clf()
ax = df[(df["data"]=="replay_mobile") & (df["features"] != "vgg16_normalized_faces")].boxplot("dev_eer", by=["model"], return_type='axes')
plt.title("Replay-Mobile")
plt.suptitle("")
plt.xlabel("Model")
plt.ylabel("Dev EER Score")
plt.ylim((0.25, 0.5))
plt.savefig("figures/model_replaymobile.pdf")
plt.clf()
ax = df[df["data"]=="replay_attack"].boxplot("dev_eer", by=["model"], return_type='axes')
plt.title("Replay-Attack")
plt.suptitle("")
plt.xlabel("Model")
plt.ylim((0.25, 0.5))
plt.ylabel("Dev EER Score")
plt.savefig("figures/model_replayattack.pdf")
plt.clf()
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,504
|
selimfirat/ocfpad
|
refs/heads/master
|
/do_evaluation.py
|
import bob
import numpy as np
import os
import pickle
from bob.measure import eer, eer_threshold, plot
from sklearn.metrics import roc_curve, roc_auc_score
import matplotlib.pyplot as plt
from plot_helpers import plot_scores_distributions, compare_dets
pkl_path = "/mnt/storage2/pad/pkl/"
def plot_far_frr(negatives, positives, path, title):
plot.roc(negatives, positives)
plt.xlabel("False Acceptance Rate (FAR)")
plt.ylabel("False Rejection Rate (FRR)")
plt.title(title)
plt.savefig(path)
plt.clf()
def plot_det(negatives, positives, path, title):
plot.det(negatives, positives)
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.xlabel("False Acceptance")
plt.ylabel("False Rejection")
plt.title(title)
plt.savefig(path)
plt.clf()
def plot_epc(dev_negatives, dev_positives, test_negatives, test_positives, path, title):
plot.epc(dev_negatives, dev_positives, test_negatives, test_positives)
plt.xlabel("Cost")
plt.ylabel("Minimum HTER (%)")
plt.title(title)
plt.savefig(path)
plt.clf()
def plot_det_comparison(y, y_pred, videos, fpath="figures/det_scenarios.pdf"):
vid_bonafide = np.zeros(len(videos), dtype=bool)
vid_mobile = np.zeros(len(videos), dtype=bool)
vid_highdef = np.zeros(len(videos), dtype=bool)
vid_print = np.zeros(len(videos), dtype=bool)
print(fpath, videos)
for i, vid in enumerate(videos):
if not "attack" in vid:
vid_bonafide[i] = True
elif "print" in vid:
vid_print[i] = True
elif "photo" in vid:
vid_mobile[i] = True
elif "video" in vid:
vid_highdef[i] = True
positives = y_pred[vid_bonafide]
print(sum(vid_bonafide), sum(vid_highdef), sum(vid_print), sum(vid_mobile))
scores_neg = [y_pred[vid_highdef], y_pred[vid_mobile], y_pred[vid_print]]
scores_pos = [positives, positives, positives]
types = ["photo", "video", "print"]
compare_dets(scores_neg, scores_pos, types, [10, 90, 10, 90])
plt.savefig(fpath)
plt.clf()
"""
for model_folder in reversed(os.listdir(pkl_path)):
scores_pkl = os.path.join(pkl_path, model_folder, "scores.pkl")
print(model_folder)
if not os.path.exists(scores_pkl):
continue
with open(scores_pkl, "rb") as m:
r = pickle.load(m)
if len(r) == 12:
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = r
else:
y_dev, y_dev_pred, dev_videos, y_test, y_test_pred, test_videos = r
dev_negatives, dev_positives = y_dev_pred[y_dev==0], y_dev_pred[y_dev==1]
test_negatives, test_positives = y_test_pred[y_test==0], y_test_pred[y_test==1]
plot_far_frr(dev_negatives, dev_positives, "figures/test_roc.pdf", "FAR vs. FRR Curve")
plot_epc(dev_negatives, dev_positives, test_negatives, test_positives, "figures/test_epc.pdf", "Expected Performance Curve")
plot_det(dev_negatives, dev_positives, "figures/test_det.pdf", "Detection Error Trade-off Curve")
plot_scores_distributions([dev_negatives, dev_positives], [test_negatives, test_positives], path="figures/test_hists.pdf")
threshold = eer_threshold(dev_negatives, dev_positives)
eer, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
print(eer, far, frr, threshold)
print(len(dev_videos))
plot_det_comparison(y_test, y_test_pred, test_videos)
# Replay-Attack
# attack_mobile
# attack_highdef
# attack_print
# Replay-Mobile
# attack_mobile
# attack_highdef
# attack_print
break
"""
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,505
|
selimfirat/ocfpad
|
refs/heads/master
|
/convlstm_main.py
|
import argparse
import pickle
import numpy as np
import os
from sklearn.metrics import roc_curve, roc_auc_score
from torch.autograd import Variable
from tqdm import tqdm
import h5py
import torch
from convlstm_autoencoder import ConvLSTMAutoencoder
def calculate_metrics(y, y_pred, threshold=None, test_videos=None):
if threshold == None:
fpr, tpr, threshold = roc_curve(y, y_pred, pos_label=1)
fnr = 1 - tpr
threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
else:
eer = sum(np.ones_like(y)[np.argwhere(np.logical_and(y == 0, y_pred >= threshold))])/sum(1 - y) # far
eer += sum(np.ones_like(y)[np.argwhere(np.logical_and(y == 1, y_pred < threshold))])/sum(y) # frr
eer /= 2
eer = eer[0]
roc_auc = roc_auc_score(y, y_pred)
if test_videos:
vid_bonafide = np.zeros(len(test_videos), dtype=bool)
vid_mobile = np.zeros(len(test_videos), dtype=bool)
vid_highdef = np.zeros(len(test_videos), dtype=bool)
vid_print = np.zeros(len(test_videos), dtype=bool)
for i, vid in enumerate(test_videos):
if not "attack" in vid:
vid_bonafide[i] = True
elif "photo" in vid:
vid_mobile[i] = True
elif "video" in vid:
vid_highdef[i] = True
elif "print" in vid:
vid_print[i] = True
test_far_mobile = sum(y_test_pred[vid_mobile] >= threshold) / sum(vid_mobile)
test_far_highdef = sum(y_test_pred[vid_highdef] >= threshold) / sum(vid_highdef)
test_far_print = sum(y_test_pred[vid_print] >= threshold) / sum(vid_print)
bpcer = sum(y_test_pred[vid_bonafide] < threshold) / sum(vid_bonafide)
apcer = max(test_far_mobile, test_far_highdef, test_far_print)
acer = (apcer + bpcer)/2
print("ACER", acer)
return eer, roc_auc, threshold
def get_eval_videos(f, split, data_name):
data = {}
if data_name == "replay_attack":
for m in ["fixed", "hand"]:
for i, vid_idx in enumerate(f[split]["attack"][m]):
vid = f[split]["attack"][m][vid_idx]
vid_arr = np.array(vid, dtype=np.float32) / 255
vid_arr = torch.tensor(vid_arr)
data[vid_idx] = {
"label": 0, # means imposter
"features": vid_arr
}
else:
for vid_idx in f[split]["attack"]:
vid = f[split]["attack"][vid_idx]
vid_arr = np.array(vid, dtype=np.float32) / 255
vid_arr = torch.tensor(vid_arr)
data[vid_idx] = {
"label": 0, # means imposter
"features": vid_arr
}
for i, vid_idx in enumerate(f[split]["real"]):
vid = f[split]["real"][vid_idx]
vid_arr = np.array(vid, dtype=np.float32) / 255
vid_arr = torch.tensor(vid_arr)
data[vid_idx] = {
"label": 1, # genuine
"features": vid_arr
}
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser("Running ConvLSTM Autoencoder")
parser.add_argument("--data", default="replay_attack")
parser.add_argument("--cuda", default=0, type=int)
parser.add_argument("--feature", default="raw_normalized_faces", type=str,
choices=["raw_faces", "raw_normalized_faces", "raw_frames"])
parser.add_argument("--epochs", default=1, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--reg", default=0.5, type=float)
parser.add_argument("--kernel_size", default=3, type=int)
parser.add_argument("--interdb", action="store_true", default=False)
parser.add_argument("--log", default="convlstm_results.csv", type=str)
args = vars(parser.parse_args())
experiment_name = f"{args['data']}_convlstm_{str(args['epochs'])}epochs_lr{str(args['lr'])}_reg{str(args['reg'])}_{args['feature']}"
path = os.path.join("/mnt/storage2/pad/", args["data"], args["feature"]+".h5")
f = h5py.File(path, "r")
pkl_path = os.path.join("/mnt/storage2/pad/pkl/", experiment_name)
if not os.path.exists(pkl_path):
os.makedirs(pkl_path)
model_path = os.path.join(pkl_path, "model.h5")
if args["interdb"]:
scores_path = os.path.join(pkl_path, "interdb_scores.pkl")
else:
scores_path = os.path.join(pkl_path, "scores.pkl")
if not os.path.exists(model_path):
X = []
for vid_idx, vid in tqdm(f["train"]["real"].items()):
vid_arr = np.array(vid, dtype=np.float32) / 255
vid_arr = torch.tensor(vid_arr)
X.append(vid_arr)
stae = ConvLSTMAutoencoder(num_epochs=args["epochs"], lr=args["lr"], cuda_idx=args["cuda"], reg=args["reg"], kernel_size=args["kernel_size"])
stae.fit(X)
torch.save(stae, model_path)
stae = torch.load(model_path)
if not os.path.exists(scores_path):
dev = get_eval_videos(f, "devel", args["data"])
y_dev = np.zeros(len(dev.keys()))
y_dev_pred = np.zeros(len(dev.keys()))
dev_videos = []
data = []
for i, (name, vid) in tqdm(enumerate(dev.items())):
vid_score = -stae.decision_function([vid["features"]])
y_dev_pred[i] = vid_score
y_dev[i] = vid["label"]
dev_videos.append(name)
dev_eer, dev_roc_auc, threshold = calculate_metrics(y_dev, y_dev_pred)
print("Per-Video Results")
print(f"Development EER: {np.round(dev_eer, 4)} ROC (AUC): {np.round(dev_roc_auc,4)}")
if args["interdb"]:
other_data = "replay_attack" if args['data'] == "replay_mobile" else "replay_mobile"
other_path = os.path.join("/mnt/storage2/pad/", other_data, args["feature"] + ".h5")
other_f = h5py.File(other_path, "r")
test = get_eval_videos(other_f, "test", other_data)
else:
test = get_eval_videos(f, "test", args["data"])
y_test = np.zeros(len(test.keys()))
y_test_pred = np.zeros(len(test.keys()))
test_videos = []
data = []
for i, (name, vid) in tqdm(enumerate(test.items())):
vid_score = -stae.decision_function([vid["features"]])
y_test_pred[i] = vid_score
y_test[i] = vid["label"]
test_videos.append(name)
test_eer, test_roc_auc, _ = calculate_metrics(y_test, y_test_pred, test_videos=test_videos)
print(f"Test HTER: {np.round(test_eer, 4)} ROC (AUC): {np.round(test_roc_auc,4)}")
with open(scores_path, "wb+") as m:
pickle.dump((y_dev, y_dev_pred, dev_videos, y_test, y_test_pred, test_videos), m)
with open(scores_path, "rb") as m:
y_dev, y_dev_pred, dev_videos, y_test, y_test_pred, test_videos = pickle.load(m)
dev_eer, dev_roc_auc, threshold = calculate_metrics(y_dev, y_dev_pred)
test_hter, test_roc_auc, _ = calculate_metrics(y_test, y_test_pred)
if not os.path.exists(args["log"]):
with open(args["log"], 'w+') as fd:
fd.write(",".join(
["data", "interdb", "model", "feature", "epochs", "lr", "reg", "kernel_size", "dev_eer", "dev_roc_auc",
"test_hter", "test_roc_auc"]) + "\n")
res = [args["data"], str(args["interdb"]), "convlstm", args["feature"], str(args["epochs"]), str(args["lr"]), str(args["reg"]), str(args["kernel_size"]), str(dev_eer), str(dev_roc_auc), str(test_hter), str(test_roc_auc)]
print("Per-Video Results")
print(f"Development EER: {np.round(dev_eer, 4)} ROC (AUC): {np.round(dev_roc_auc,4)}")
print(f"Test HTER: {np.round(test_hter, 4)} ROC (AUC): {np.round(test_roc_auc,4)}")
with open(args["log"], 'a+') as fd:
fd.write(",".join(res) + "\n")
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,506
|
selimfirat/ocfpad
|
refs/heads/master
|
/faces_to_video.py
|
import os
from tqdm import tqdm
input_path = "/mnt/storage2/pad/frames"
videos_path = "/mnt/storage2/pad/videos"
output_path = "/mnt/storage2/pad/faces"
for root, dirs, files in tqdm(os.walk(videos_path)):
hpath = root.replace(videos_path, "").strip("/")
print(hpath)
if "replay_mobile" in hpath:
fps = 30
else:
fps = 25
for file in tqdm(files):
if not file.endswith(".mov"):
continue
res_path = os.path.join(output_path, hpath)
if not os.path.exists(res_path):
os.makedirs(res_path)
frames_path = os.path.join(input_path, hpath, file.replace('.mov', "_aligned"))
res_fpath = os.path.join(res_path, file)
print(frames_path, res_fpath)
os.system(f"ffmpeg -f image2 -r 30 -i {frames_path}/frame_det_00_%06d.bmp {res_fpath}")
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,507
|
selimfirat/ocfpad
|
refs/heads/master
|
/normalized_model.py
|
from pyod.models.base import BaseDetector
from pyod.utils import invert_order
from sklearn.base import BaseEstimator
from sklearn.preprocessing import StandardScaler
# A normalization detector_ constructed due to the feedback of Prof. Arashloo during presentation.
# This class Takes detector_ in constructor and performs normalization during training and prediction.
class NormalizedModel(BaseDetector):
def __init__(self, detector_):
super().__init__()
self.detector_ = detector_
self.normalizer = StandardScaler()
def fit(self, X, **kwargs):
self._classes = 2
X = self.normalizer.fit_transform(X)
self.detector_.fit(X)
self.decision_scores_ = invert_order(
self.detector_.decision_function(X))
self._process_decision_scores()
def decision_function(self, X, **kwargs):
X = self.normalizer.transform(X)
return self.detector_.decision_function(X)
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,508
|
selimfirat/ocfpad
|
refs/heads/master
|
/generate_tables.py
|
import numpy as np
import os
import pickle
import pandas as pd
from bob.measure._library import eer_threshold
from sklearn.metrics import roc_auc_score
from bob.measure import eer, eer_threshold
df = pd.read_csv("results.csv")
df = df[df["normalize"] == "True"]
model_names = {
"iforest": "iForest",
"ocsvm": "OC-SVM",
"ae": "Autoencoder"
}
agg_names = {
"mean": "Mean",
"max": "Max"
}
region_names = {
"normalized_faces": "Normalized Face", #\\begin{tabular}[c]{@{}c@{}}Normalized\\\\ Face\\end{tabular}",
"faces": "Face",
"frames": "Frame"
}
path = "figures"
pkl_path = "/mnt/storage2/pad/pkl/"
for normalized in ["", "_normalized"]:
for data in ["replay_mobile", "replay_attack"]:
fname = data + "_baselines" + normalized + ".tex"
table = """
\\begin{tabular}{@{}ccccc@{}}
\\toprule
Model & Region & Aggregation & Video AUC (\\%) & Video EER (\\%) \\\\ \\midrule
"""
for mi, model in enumerate(["iforest", "ocsvm", "ae"]):
table += "\\multirow{4}{*}{" + model_names[model] + "} & "
for ri, region in enumerate(["frames", "faces", "normalized_faces"]):
if data == "replay_mobile" and region == "normalized_faces":
continue
if ri > 0:
table += " & "
table += "\\multirow{2}{*}{" + region_names[region] + "} & "
for ai, aggregate in enumerate(["mean", "max"]):
if ai > 0:
table += " & & "
table += agg_names[aggregate] + " & "
scores_pkl_path = os.path.join(pkl_path, f"{data}_{model}_{aggregate}{normalized}_vgg16_{region}/scores.pkl")
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_pred[y_dev == 0], y_dev_pred[y_dev == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
eer_score, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
roc_auc = roc_auc_score(y_dev, y_dev_pred)
table += f" {str(np.round(roc_auc*100,2))} & {str(np.round(eer_score*100,2))} "
table += "\\\\ "
if mi != 2:
table += "\\midrule "
table += """
\\bottomrule
\\end{tabular}
"""
tpath = os.path.join(path, fname)
with open(tpath, "w+") as t:
t.write(table)
print(data, normalized, "Done")
for normalized in ["", "_normalized"]:
for data in ["replay_mobile", "replay_attack"]:
fname = data + "_baselines" + normalized + "_frames.tex"
table = """
\\begin{tabular}{@{}cccc@{}}
\\toprule
Model & Region & Frame AUC (\\%) & Frame EER (\\%) \\\\ \\midrule
"""
for mi, model in enumerate(["iforest", "ocsvm", "ae"]):
table += "\\multirow{2}{*}{" + model_names[model] + "} & "
for ri, region in enumerate(["frames", "faces", "normalized_faces"]):
if data == "replay_mobile" and region == "normalized_faces":
continue
if ri > 0:
table += " & "
table += "" + region_names[region] + " & "
for ai, aggregate in enumerate(["mean"]):
if ai > 0:
table += " & & "
scores_pkl_path = os.path.join(pkl_path, f"{data}_{model}_{aggregate}{normalized}_vgg16_{region}/scores.pkl")
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_frames_pred[y_dev_frames == 0], y_dev_frames_pred[y_dev_frames == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
eer_score, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
roc_auc = roc_auc_score(y_dev_frames, y_dev_frames_pred)
table += f" {str(np.round(roc_auc*100,2))} & {str(np.round(eer_score*100,2))} "
table += "\\\\ "
if mi != 2:
table += "\\midrule "
table += """
\\bottomrule
\\end{tabular}
"""
tpath = os.path.join(path, fname)
with open(tpath, "w+") as t:
t.write(table)
print(data, normalized, "Done", " Frame Level")
# Image quality
for normalized in ["", "_normalized"]:
for data in ["replay_attack"]:
fname = data + "_image_quality" + normalized + ".tex"
table = """
\\begin{tabular}{@{}cccc@{}}
\\toprule
Model & Aggregation & Video AUC (\\%) & Video EER (\\%) \\\\ \\midrule
"""
for mi, model in enumerate(["iforest", "ocsvm", "ae"]):
table += "\\multirow{2}{*}{" + model_names[model] + "} & "
for ai, aggregate in enumerate(["mean", "max"]):
if ai > 0:
table += " & "
table += agg_names[aggregate] + " & "
scores_pkl_path = os.path.join(pkl_path, f"{data}_{model}_{aggregate}{normalized}_vgg16_{region}/scores.pkl")
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_pred[y_dev == 0], y_dev_pred[y_dev == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
eer_score, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
roc_auc = roc_auc_score(y_dev, y_dev_pred)
table += f" {str(np.round(roc_auc*100,2))} & {str(np.round(eer_score*100,2))} "
table += "\\\\ "
if mi != 2:
table += "\\midrule "
table += """
\\bottomrule
\\end{tabular}
"""
tpath = os.path.join(path, fname)
with open(tpath, "w+") as t:
t.write(table)
print(data, normalized, " Image quality", "Done")
for normalized in ["", "_normalized"]:
for data in ["replay_attack"]:
fname = data + "_image_quality" + normalized + "_frames.tex"
table = """
\\begin{tabular}{@{}ccc@{}}
\\toprule
Model & Frame AUC (\\%) & Frame EER (\\%) \\\\ \\midrule
"""
for mi, model in enumerate(["iforest", "ocsvm", "ae"]):
table += model_names[model] + " & "
for ai, aggregate in enumerate(["mean"]):
if ai > 0:
table += " & "
scores_pkl_path = os.path.join(pkl_path, f"{data}_{model}_{aggregate}{normalized}_vgg16_{region}/scores.pkl")
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_frames_pred[y_dev_frames == 0], y_dev_frames_pred[y_dev_frames == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
eer_score, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
roc_auc = roc_auc_score(y_dev_frames, y_dev_frames_pred)
table += f" {str(np.round(roc_auc*100,2))} & {str(np.round(eer_score*100,2))} "
table += "\\\\ "
if mi != 2:
table += "\\midrule "
table += """
\\bottomrule
\\end{tabular}
"""
tpath = os.path.join(path, fname)
with open(tpath, "w+") as t:
t.write(table)
print(data, normalized, "Done", " Frame Level")
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,509
|
selimfirat/ocfpad
|
refs/heads/master
|
/print_best_models.py
|
import json
import numpy as np
import os
import pickle
from bob.measure import eer
from bob.measure._library import eer_threshold
from sklearn.metrics import roc_auc_score
from do_evaluation import plot_det_comparison
def get_best_models():
pkl_path = "/mnt/storage2/pad/pkl"
models = {
"replay_attack": {
"ocsvm": {
"dev_eer": 999
},
"iforest": {
"dev_eer": 999
},
"convlstm": {
"dev_eer": 999
},
"ae": {
"dev_eer": 999
}
},
"replay_mobile": {
"ocsvm": {
"dev_eer": 999
},
"iforest": {
"dev_eer": 999
},
"convlstm": {
"dev_eer": 999
},
"ae": {
"dev_eer": 999
}
}
}
for data in ["replay_mobile", "replay_attack"]:
for normalized in ["_normalized"]:
for mi, model in enumerate(["iforest", "ocsvm", "ae"]):
for ri, region in enumerate(["frames", "faces", "normalized_faces"]):
if data == "replay_mobile" and region == "normalized_faces":
continue
for ai, aggregate in enumerate(["mean", "max"]):
scores_pkl_path = os.path.join(pkl_path,
f"{data}_{model}_{aggregate}{normalized}_vgg16_{region}/scores.pkl")
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(
open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_pred[y_dev == 0], y_dev_pred[y_dev == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
dev_eer, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
dev_auc = roc_auc_score(y_dev, y_dev_pred)
if dev_eer < models[data][model]["dev_eer"] or (dev_eer == models[data][model]["dev_eer"] and dev_auc > models[data][model]["dev_eer"]):
models[data][model]["dev_eer"] = dev_eer
models[data][model]["dev_auc"] = dev_auc
test_far = sum(np.ones_like(y_test)[np.argwhere(
np.logical_and(y_test == 0, y_test_pred >= threshold))]) / sum(
1 - y_test) # far
test_frr = sum(np.ones_like(y_test)[np.argwhere(
np.logical_and(y_test == 1, y_test_pred < threshold))]) / sum(
y_test) # frr
hter = (test_far + test_frr) / 2
hter = hter[0]
test_auc = roc_auc_score(y_test, y_test_pred)
models[data][model]["dev_eer"] = dev_eer
models[data][model]["dev_auc"] = dev_auc
models[data][model]["test_hter"] = hter
models[data][model]["test_auc"] = test_auc
models[data][model]["scores_path"] = scores_pkl_path
vid_bonafide = np.zeros(len(test_videos), dtype=bool)
vid_mobile = np.zeros(len(test_videos), dtype=bool)
vid_highdef = np.zeros(len(test_videos), dtype=bool)
vid_print = np.zeros(len(test_videos), dtype=bool)
for i, vid in enumerate(test_videos):
if not "attack" in vid:
vid_bonafide[i] = True
elif "photo" in vid:
vid_mobile[i] = True
elif "video" in vid:
vid_highdef[i] = True
elif "print" in vid:
vid_print[i] = True
test_far_mobile = sum(y_test_pred[vid_mobile] >= threshold) / sum(vid_mobile)
test_far_highdef = sum(y_test_pred[vid_highdef] >= threshold) / sum(vid_highdef)
test_far_print = sum(y_test_pred[vid_print] >= threshold) / sum(vid_print)
bpcer = sum(y_test_pred[vid_bonafide] < threshold) / sum(vid_bonafide)
apcer = max(test_far_mobile, test_far_highdef, test_far_print)
models[data][model]["acer"] = (bpcer + apcer) / 2
models[data][model]["dev_eer"] = dev_eer
models[data][model]["dev_auc"] = dev_auc
models[data][model]["test_hter"] = hter
models[data][model]["test_auc"] = test_auc
models[data][model]["scores_path"] = scores_pkl_path
for folder in os.listdir(pkl_path):
if not os.path.isdir(os.path.join(pkl_path, folder)) or not ("convlstm" in folder) or not (data in folder):
continue
model = "convlstm"
scores_pkl_path = os.path.join(pkl_path, folder, "scores.pkl")
if not os.path.exists(scores_pkl_path):
continue
y_dev, y_dev_pred, dev_videos, y_test, y_test_pred, test_videos = pickle.load(
open(scores_pkl_path, "rb"))
dev_negatives, dev_positives = y_dev_pred[y_dev == 0], y_dev_pred[y_dev == 1]
threshold = eer_threshold(dev_negatives, dev_positives)
dev_eer, far, frr = eer(dev_negatives, dev_positives, also_farfrr=True)
dev_auc = roc_auc_score(y_dev, y_dev_pred)
if dev_eer < models[data][model]["dev_eer"] or (
dev_eer == models[data][model]["dev_eer"] and dev_auc > models[data][model]["dev_eer"]):
models[data][model]["dev_eer"] = dev_eer
models[data][model]["dev_auc"] = dev_auc
test_far = sum(np.ones_like(y_test)[np.argwhere(np.logical_and(y_test == 0, y_test_pred >= threshold))]) / sum(
1 - y_test) # far
test_frr = sum(np.ones_like(y_test)[np.argwhere(np.logical_and(y_test == 1, y_test_pred < threshold))]) / sum(
y_test) # frr
hter = (test_far+test_frr)/2
hter = hter[0]
test_auc = roc_auc_score(y_test, y_test_pred)
models[data][model]["dev_eer"] = dev_eer
models[data][model]["dev_auc"] = dev_auc
models[data][model]["test_hter"] = hter
models[data][model]["test_auc"] = test_auc
models[data][model]["scores_path"] = scores_pkl_path
vid_bonafide = np.zeros(len(test_videos), dtype=bool)
vid_mobile = np.zeros(len(test_videos), dtype=bool)
vid_highdef = np.zeros(len(test_videos), dtype=bool)
vid_print = np.zeros(len(test_videos), dtype=bool)
for i, vid in enumerate(test_videos):
if not "attack" in vid:
vid_bonafide[i] = True
elif "photo" in vid:
vid_mobile[i] = True
elif "video" in vid:
vid_highdef[i] = True
elif "print" in vid:
vid_print[i] = True
test_far_mobile = sum(y_test_pred[vid_mobile] >= threshold) / sum(vid_mobile)
test_far_highdef = sum(y_test_pred[vid_highdef] >= threshold) / sum(vid_highdef)
test_far_print = sum(y_test_pred[vid_print] >= threshold) / sum(vid_print)
bpcer = sum(y_test_pred[vid_bonafide] < threshold) / sum(vid_bonafide)
apcer = max(test_far_mobile, test_far_highdef, test_far_print)
models[data][model]["acer"] = (bpcer + apcer) / 2
return models
best_models = get_best_models()
# Export Detection-Error Trade-off curve
for data, models in best_models.items():
scores_pkl_path = models["convlstm"]["scores_path"]
with open(scores_pkl_path, "rb") as m:
r = pickle.load(m)
y_dev, y_dev_pred, dev_videos, y_test, y_test_pred, test_videos = r
dev_negatives, dev_positives = y_dev_pred[y_dev==0], y_dev_pred[y_dev==1]
test_negatives, test_positives = y_test_pred[y_test==0], y_test_pred[y_test==1]
plot_det_comparison(y_test, y_test_pred, test_videos, f"figures/{data}_det.pdf")
print(json.dumps(best_models, indent=4, sort_keys=True))
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,510
|
selimfirat/ocfpad
|
refs/heads/master
|
/main.py
|
import argparse
import pickle
import numpy as np
import os
from normalized_model import NormalizedModel
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
np.random.seed(1)
def get_training_frames(f, dims=4096):
split = "train"
num_reals = sum(f[split]["real"][vid].shape[0] for vid in f[split]["real"])
X = np.zeros((num_reals, dims))
cur = 0
for vid_idx in f[split]["real"]:
vid = f[split]["real"][vid_idx]
X[cur:cur + vid.shape[0], :] = vid[:, :]
cur += vid.shape[0]
return X
def aggregate(x):
if args["aggregate"] == "mean":
return x.mean()
elif args["aggregate"] == "max":
return x.max()
def get_eval_videos(f, split, data_name):
data = {}
if data_name == "replay_attack":
for m in ["fixed", "hand"]:
for vid_idx in f[split]["attack"][m]:
vid = f[split]["attack"][m][vid_idx]
data[vid_idx] = {
"label": 0, # means imposter
"features": vid[:, :]
}
else:
for vid_idx in f[split]["attack"]:
vid = f[split]["attack"][vid_idx]
data[vid_idx] = {
"label": 0, # means imposter
"features": vid[:, :]
}
for vid_idx in f[split]["real"]:
vid = f[split]["real"][vid_idx]
data[vid_idx] = {
"label": 1, # genuine
"features": vid[:, :]
}
return data
def eval(f, split, data):
dev = get_eval_videos(f, split, data)
y_dev = np.zeros(len(dev.keys()))
y_dev_pred = np.zeros(len(dev.keys()))
dev_videos = []
total_frames = sum(vid["features"].shape[0] for vid in dev.values())
y_dev_frames = np.zeros(total_frames)
y_dev_frames_pred = np.zeros(total_frames)
dev_videos_frames = []
cur_idx = 0
for i, (name, vid) in tqdm(enumerate(dev.items())):
frame_scores = model.predict_proba(vid["features"])[:, 0]
num_frames = frame_scores.shape[0]
y_dev_frames_pred[cur_idx:cur_idx + num_frames] = frame_scores
y_dev_frames[cur_idx:cur_idx + num_frames] = vid["label"]
cur_idx += num_frames
y_dev_pred[i] = aggregate(frame_scores)
y_dev[i] = vid["label"]
dev_videos.append(name)
dev_videos_frames.append(name)
return y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames
def calculate_metrics(y, y_pred, threshold=None):
if threshold == None:
fpr, tpr, threshold = roc_curve(y, y_pred, pos_label=1)
fnr = 1 - tpr
threshold = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
else:
eer = sum(np.ones_like(y)[np.argwhere(np.logical_and(y == 0, y_pred >= threshold))])/sum(1 - y) # far
eer += sum(np.ones_like(y)[np.argwhere(np.logical_and(y == 1, y_pred < threshold))])/sum(y) # frr
eer /= 2
eer = eer[0]
roc_auc = roc_auc_score(y, y_pred)
return eer, roc_auc, threshold
def calculate_all_metrics(y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames):
dev_eer, dev_roc_auc, threshold = calculate_metrics(y_dev, y_dev_pred)
test_hter, test_roc_auc, _ = calculate_metrics(y_test, y_test_pred, threshold)
print("Per-Video Results")
print(f"Development EER: {np.round(dev_eer, 4)} ROC (AUC): {np.round(dev_roc_auc,4)}")
print(f"Test HTER: {np.round(test_hter, 4)} ROC (AUC): {np.round(test_roc_auc,4)}")
dev_frames_eer, dev_frames_roc_auc, frames_threshold = calculate_metrics(y_dev_frames, y_dev_frames_pred)
test_frames_hter, test_frames_roc_auc, _ = calculate_metrics(y_test_frames, y_test_frames_pred, frames_threshold)
print("Per-Frame Results")
print(f"Development EER: {np.round(dev_frames_eer, 4)} ROC (AUC): {np.round(dev_frames_roc_auc,4)}")
print(f"Test HTER: {np.round(test_frames_hter, 4)} ROC (AUC): {np.round(test_frames_roc_auc,4)}")
vid_bonafide = np.zeros(len(test_videos), dtype=bool)
vid_mobile = np.zeros(len(test_videos), dtype=bool)
vid_highdef = np.zeros(len(test_videos), dtype=bool)
vid_print = np.zeros(len(test_videos), dtype=bool)
for i, vid in enumerate(test_videos):
if not "attack" in vid:
vid_bonafide[i] = True
elif "photo" in vid:
vid_mobile[i] = True
elif "video" in vid:
vid_highdef[i] = True
elif "print" in vid:
vid_print[i] = True
test_far_mobile = sum(y_test_pred[vid_mobile] >= threshold) / sum(vid_mobile)
test_far_highdef = sum(y_test_pred[vid_highdef] >= threshold) / sum(vid_highdef)
test_far_print = sum(y_test_pred[vid_print] >= threshold) / sum(vid_print)
bpcer = sum(y_test_pred[vid_bonafide] < threshold) / sum(vid_bonafide)
apcer = max(test_far_mobile, test_far_highdef, test_far_print)
acer = (apcer + bpcer)/2
print("ACER", acer)
return dev_eer, dev_roc_auc, threshold, test_hter, test_roc_auc, dev_frames_eer, dev_frames_roc_auc, frames_threshold, test_frames_hter, test_frames_roc_auc
parser = argparse.ArgumentParser("One Class Face Presentation Attack Detection Pipeline")
parser.add_argument("--model", default="iforest", choices=["ocsvm", "iforest", "ae", "stae"], type=str, help="Name of the method")
parser.add_argument("--aggregate", default="mean", choices=["mean", "max"], type=str, help="Aggregate block scores via mean/max or None")
parser.add_argument("--data", default="replay_attack", choices=["replay_attack", "replay_mobile"], type=str)
parser.add_argument("--data_path", default="/mnt/storage2/pad/", type=str)
parser.add_argument("--features", default=["vgg16_frames"], nargs="+", choices=["image_quality", "vgg16_faces", "vgg16_frames", "vggface_frames", "raw_faces", "vgg16_normalized_faces"])
parser.add_argument("--log", default=None, type=str)
parser.add_argument("--interdb", default=False, action="store_true")
parser.add_argument("--normalize", default=False, action="store_true")
args = vars(parser.parse_args())
print(args)
import numpy as np
import h5py
import os
from pyod.models.ocsvm import OCSVM
from pyod.models.iforest import IForest
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, roc_curve
path = os.path.join(args["data_path"], args["data"], f"{'_'.join(args['features'])}.h5")
f = h5py.File(path, "r")
experiment_name = f"{args['data']}_{args['model']}_{args['aggregate']}{'_normalized' if args['normalize'] else ''}_{'_'.join(args['features'])}"
save_path = os.path.join("/mnt/storage2/pad/pkl", experiment_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
model_path = os.path.join(save_path, "model.pkl")
if not os.path.exists(model_path):
models = {
"ocsvm": OCSVM(),
"iforest": IForest(behaviour="new"),
}
if args["model"] == "ae":
from pyod.models.auto_encoder import AutoEncoder
if args["features"][0] == "image_quality":
hidden_neurons = [18, 18, 18, 18]
else:
hidden_neurons = None
models["ae"] = AutoEncoder(epochs=50, preprocessing=args["normalize"], hidden_neurons=hidden_neurons)
if args["normalize"] and args["model"] != "ae":
model = NormalizedModel(models[args["model"]])
else:
model = models[args["model"]]
X_train = get_training_frames(f, 4096 if not "image_quality" in "_".join(args["features"]) else 18)
model.fit(X_train)
with open(model_path, "wb+") as m:
pickle.dump(model, m)
with open(model_path, 'rb') as m:
model = pickle.load(m)
if args["interdb"]:
scores_path = os.path.join(save_path, "scores.pkl")
if not os.path.exists(scores_path):
raise Exception("Please do intra-database experiment of the model first.")
with open(scores_path, "rb") as m:
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, _, _, _, _, _, _ = pickle.load(m)
scores_path = os.path.join(save_path, "interdb_scores.pkl")
if not os.path.exists(scores_path):
other_data = "replay_attack" if args['data'] == "replay_mobile" else "replay_mobile"
other_path = os.path.join(args["data_path"], other_data, f"{'_'.join(args['features'])}.h5")
other_f = h5py.File(other_path, "r")
y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = eval(other_f, "test", other_data)
with open(scores_path, "wb+") as m:
pickle.dump((y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames), m)
with open(scores_path, "rb") as m:
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(m)
# https://arxiv.org/pdf/1807.00848.pdf
dev_eer, dev_roc_auc, threshold, test_hter, test_roc_auc, dev_frames_eer, dev_frames_roc_auc, frames_threshold, test_frames_hter, test_frames_roc_auc = calculate_all_metrics(y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames)
else: # Intra database evaluation
scores_path = os.path.join(save_path, "scores.pkl")
if not os.path.exists(scores_path):
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames = eval(f, "devel", args['data'])
y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = eval(f, "test", args['data'])
with open(scores_path, "wb+") as m:
pickle.dump((y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames), m)
with open(scores_path, "rb") as m:
y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames = pickle.load(m)
# https://arxiv.org/pdf/1807.00848.pdf
dev_eer, dev_roc_auc, threshold, test_hter, test_roc_auc, dev_frames_eer, dev_frames_roc_auc, frames_threshold, test_frames_hter, test_frames_roc_auc = calculate_all_metrics(y_dev, y_dev_pred, dev_videos, y_dev_frames, y_dev_frames_pred, dev_videos_frames, y_test, y_test_pred, test_videos, y_test_frames, y_test_frames_pred, test_videos_frames)
if args["log"] is not None:
res = [args['data'], str(args["interdb"]), args['model'], args['aggregate'], '_'.join(args['features']), str(args["normalize"]), str(np.round(dev_eer, 4)), str(np.round(dev_roc_auc, 4)), str(np.round(test_hter, 4)), str(np.round(test_roc_auc, 4)), str(np.round(dev_frames_eer, 4)), str(np.round(dev_frames_roc_auc, 4)), str(np.round(test_frames_hter, 4)), str(np.round(test_frames_roc_auc, 4))]
if not os.path.exists(args["log"]):
with open(args["log"], 'w+') as fd:
fd.write(",".join(["data", "interdb", "model", "aggregate", "features", "normalize", "dev_eer", "dev_roc_auc", "test_hter", "test_roc_auc", "dev_frames_eer", "dev_frames_roc_auc", "test_frames_hter", "test_frames_roc_auc"]) + "\n")
with open(args["log"], 'a+') as fd:
fd.write(",".join(res) + "\n")
|
{"/convlstm_autoencoder.py": ["/convlstm_cell.py"], "/do_evaluation.py": ["/plot_helpers.py"], "/convlstm_main.py": ["/convlstm_autoencoder.py"], "/print_best_models.py": ["/do_evaluation.py"], "/main.py": ["/normalized_model.py"]}
|
35,511
|
ehapsamy0/test1_dashbordToLearnWebScraping
|
refs/heads/master
|
/notepad/urls.py
|
from django.urls import path
from .views import (
create_view,
list_view,
delete_view,
update_view)
app_name = 'notes'
urlpatterns = [
path('create/',create_view,name='creat_view'),
path('list/',list_view,name='list'),
path('<int:id>/delete/',delete_view,name='delete'),
#url(r'^(?P<id>\d+)/delete/',delete_view,name='delete'),
path('<int:id>/update/',update_view,name="update"),
]
|
{"/notepad/urls.py": ["/notepad/views.py"], "/news/admin.py": ["/news/models.py"]}
|
35,512
|
ehapsamy0/test1_dashbordToLearnWebScraping
|
refs/heads/master
|
/notepad/views.py
|
from django.shortcuts import render,redirect,get_object_or_404
from .models import Note
from .forms import NoteModelForm
# Create your views here.
#CRUD
#CREATE UPDATE DELETE RETRIEVE
def create_view(request):
form = NoteModelForm(request.POST or None ,request.FILES or None )
if form.is_valid():
form.instance.user = request.user
form.save()
return redirect('/notes/list/')
context = {
'form':form,
}
return render(request,'create.html',context)
def list_view(request):
notes = Note.objects.all()
context = {
'object_list':notes,
}
return render(request,'list.html',context)
def delete_view(request,id):
item_to_delete = Note.objects.filter(pk=id)
if item_to_delete.exists():
if request.user == item_to_delete[0].user:
item_to_delete[0].delete()
return redirect('/notes/list')
def update_view(request,id):
unique_note = get_object_or_404(Note,id=id)
form = NoteModelForm(request.POST or None ,request.FILES or None ,instance = unique_note)
if form.is_valid():
form.instance.user = request.user
form.save()
return redirect('/notes/list/')
context = {
'form':form,
}
return render(request,'create.html',context)
|
{"/notepad/urls.py": ["/notepad/views.py"], "/news/admin.py": ["/news/models.py"]}
|
35,513
|
ehapsamy0/test1_dashbordToLearnWebScraping
|
refs/heads/master
|
/news/admin.py
|
from django.contrib import admin
from .models import Headline,UserProfile
# Register your models here.
admin.site.register(Headline)
admin.site.register(UserProfile)
|
{"/notepad/urls.py": ["/notepad/views.py"], "/news/admin.py": ["/news/models.py"]}
|
35,514
|
ehapsamy0/test1_dashbordToLearnWebScraping
|
refs/heads/master
|
/news/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Headline(models.Model):
title = models.CharField(max_length=150)
image = models.ImageField()
url = models.TextField()
def __str__(self):
return serlf.title
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,on_delete = models.CASCADE)
last_scrape = models.DateTimeField(null=True,blank=True)
def __str__(self):
return "{}-{}".format(self.user,self.last_scrape)
|
{"/notepad/urls.py": ["/notepad/views.py"], "/news/admin.py": ["/news/models.py"]}
|
35,517
|
Sean858/ForumApp
|
refs/heads/master
|
/forum/model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/14 下午3:32
# @Author : Gaoxiang Chen
# @Site :
# @File : model.py
# @Software: PyCharm
# ---------------------
from datetime import datetime
from flask._compat import text_type
from flask_login import UserMixin
from forum import login_manager, db
@login_manager.user_loader
def load_user(user_id):
return User.getUserById(user_id)
###USER
class User(db.Model, UserMixin):
__tablename__ = 'users'
uid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20))
_password = db.Column(db.String(20))
_email = db.Column(db.String(20), unique=True)
posts = db.relationship("Post", backref="user")
comments = db.relationship("Comment", backref="user")
likes = db.relationship("Like", backref="user")
def __init__(self, username, _password, _email):
self.username = username
self._password = _password
self._email = _email
@classmethod
def all_users(self):
return self.query.all()
@classmethod
def getUserById(self, uid):
return self.query.get(uid)
@classmethod
def getEmail(self):
return self._email
def get_id(self):
try:
return text_type(self.uid)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
###POST
class Post(db.Model):
__tablename__ = 'posts'
pid = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
content = db.Column(db.Text, nullable=False)
post_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
uid = db.Column(db.Integer, db.ForeignKey('users.uid'), nullable=False)
tid = db.Column(db.Integer, db.ForeignKey('topics.tid'), nullable=False)
comments = db.relationship("Comment", backref="posts")
likes = db.relationship("Like", backref="posts")
def __init__(self, title, content, post_time, uid, tid):
self.title = title
self.content = content
self.post_time = post_time
self.uid = uid
self.tid = tid
###Topic
class Topic(db.Model):
__tablename__ = 'topics'
tid = db.Column(db.Integer, primary_key=True)
topic = db.Column(db.String(20))
_description = db.Column(db.String(20))
parent_id = db.Column(db.String(20))
posts = db.relationship("Post", backref="topics")
path = None
def __init__(self, topic, _description, parent_id):
self.topic = topic
self._description = _description
self.parent_id = parent_id
@classmethod
def all_topics(self):
return self.query.all()
@classmethod
def getTopicById(self, tid):
return self.query.get(tid)
@classmethod
def getTopicList(self):
return [{t.topic} for t in Topic.all_topics()]
###Comments
class Comment(db.Model):
__tablename__ = 'comments'
cid = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False)
post_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
uid = db.Column(db.Integer, db.ForeignKey('users.uid'), nullable=False)
pid = db.Column(db.Integer, db.ForeignKey('posts.pid'), nullable=False)
father_id = db.Column(db.Integer, db.ForeignKey('comments.cid'), nullable=True)
def __init__(self, content, post_time, pid):
self.content = content
self.post_time = post_time
self.pid = pid
def get_id(self):
try:
return text_type(self.cid)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
###Like
class Like(db.Model):
__tablename__ = 'likes'
lid = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.Integer, db.ForeignKey('users.uid'), nullable=False)
pid = db.Column(db.Integer, db.ForeignKey('posts.pid'), nullable=False)
post_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
def __init__(self, uid, pid, post_time):
self.uid = uid
self.pid = pid
self.post_time = post_time
|
{"/forum/model.py": ["/forum/__init__.py"], "/forum/api.py": ["/forum/__init__.py"]}
|
35,518
|
Sean858/ForumApp
|
refs/heads/master
|
/forum/api.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/14 下午4:10
# @Author : Gaoxiang Chen
# @Site :
# @File : api.py
# @Software: PyCharm
# ---------------------
from datetime import datetime
from flask import render_template, request, redirect, url_for, flash
from flask_login import login_required, current_user, login_user, logout_user
from form import LoginForm, RegistrationForm, UpdateAccountForm, PostForm, CommentForm, ViewTopicForm
from forum import app, db
from model import Topic, User, Post, Comment, Like
# Function
@app.route('/')
def index():
return render_template('index.html', title='index')
# @app.route('/home')
# def home():
# form = ViewTopicForm()
# posts = Post.query.order_by(Post.post_time.desc()).all()
# if form.validate_on_submit():
# posts = Post.query.filter_by(tid=form.topic.data).first().order_by(Post.post_time.desc())
# return render_template('home.html', title='index', posts=posts, form=form)
@app.route('/home')
def home():
topics = Topic.query.filter(Topic.parent_id == None).order_by(Topic.tid)
# posts = Post.query.order_by(Post.post_time.desc()).paginate(page, POSTS_PER_PAGE, False).items
users = User.query.all()
return render_template("home.html", topics=topics, users = users)
@app.route('/topic')
def topic():
tid = int(request.args.get("topic"))
topic = Topic.query.filter(Topic.tid == tid).first()
if not topic:
return error("That topic does not exist!")
posts = Post.query.filter(Post.tid == tid).order_by(Post.pid.desc()).limit(50)
if not topic.path:
topic.path = generateLinkPath(topic.tid)
topics = Topic.query.filter(Topic.parent_id == tid).all()
return render_template("topic.html", topic=topic, posts=posts, topics=topics, path=topic.path)
def error(errormessage):
return "<b style=\"color: red;\">" + errormessage + "</b>"
# Account
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, _password=form.password.data, _email=form.email.data)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(_email=form.email.data).first()
if user._password == form.password.data:
login_user(user, remember=form.remember.data)
return redirect("/home")
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user._email = form.email.data
current_user._password = form.password.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('home'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user._email
return render_template('account.html', title='Account', form=form)
# Post
@login_required
@app.route('/new_post', methods=['POST', 'GET'])
def new_post():
form = PostForm()
topic = Topic.query.filter(Topic.tid == form.topic.data).first()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, post_time=datetime.utcnow(), uid=current_user.uid,
tid=form.topic.data)
current_user.posts.append(post)
topic.posts.append(post)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
posts = Post.query.filter(Post.pid == post.pid).order_by(Post.pid.desc())
return redirect("/viewpost?post=" + str(post.pid))
return render_template('create_post.html', title='New Post',
form=form)
@app.route('/viewpost')
def viewpost():
pid = int(request.args.get("post"))
post = Post.query.filter(Post.pid == pid).first()
if not post:
return error("That post does not exist!")
comments = Comment.query.filter(Comment.pid == pid).order_by(Comment.cid.desc()) # no need for scalability now
return render_template("viewpost.html", post=post, comments=comments)
@login_required
@app.route('/new_comment', methods=['POST', 'GET'])
def comment():
form = CommentForm()
pid = int(request.args.get("post"))
post = Post.query.filter(Post.pid == pid).first()
if not post:
return error("That post does not exist!")
comment = Comment(content=form.content.data, post_time=datetime.utcnow(), pid=pid)
current_user.comments.append(comment)
post.comments.append(comment)
db.session.commit()
return redirect("/viewpost?post=" + str(pid))
@login_required
@app.route('/like', methods=['POST', 'GET'])
def like():
pid = int(request.args.get("post"))
print(pid)
post = Post.query.filter(Post.pid == pid).first()
if not post:
return error("That post does not exist!")
likes = Like.query.filter(Like.pid == pid).first()
if (likes and likes.uid == current_user.uid):
error("You already like it!")
else:
like = Like(uid=current_user.uid, pid=pid, post_time=datetime.utcnow())
current_user.likes.append(like)
post.likes.append(like)
db.session.commit()
return redirect("/viewpost?post=" + str(pid))
def generateLinkPath(tid):
links = []
topic = Topic.query.filter(Topic.tid == tid).first()
parent = topic.query.filter(Topic.tid == topic.parent_id).first()
links.append("<a href=\"/topic?topic=" + str(topic.tid) + "\">" + topic.topic + "</a>")
while parent is not None:
links.append("<a href=\"/topic?topic=" + str(parent.tid) + "\">" + parent.topic + "</a>")
parent = Topic.query.filter(Topic.tid == parent.parent_id).first()
links.append("<a href=\"/\">Forum Index</a>")
link = ""
for l in reversed(links):
link = link + " / " + l
return link
#
# @app.route('/topic', methods=['POST', 'GET'])
# def topic():
# topics = Topic.queryAll()
# post = Post.query.filter().first()
# if not post:
# return error("That post does not exist!")
# likes = Like.query.filter(Like.uid == current_user.uid).first()
# if (likes and likes.pid == pid):
# error("You already like it!")
# else:
# like = Like(uid = current_user.uid, pid = pid, post_time = datetime.utcnow())
# current_user.likes.append(like)
# post.likes.append(like)
# db.session.commit()
# return redirect("/viewpost?post=" + str(pid))
|
{"/forum/model.py": ["/forum/__init__.py"], "/forum/api.py": ["/forum/__init__.py"]}
|
35,519
|
Sean858/ForumApp
|
refs/heads/master
|
/forum/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/14 下午8:44
# @Author : Gaoxiang Chen
# @Site :
# @File : run.py.py
# @Software: PyCharm
# ---------------------
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:chen62575858@localhost/test1"
app.config['SECRET_KEY'] = '123456'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.init_app(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
from forum import api
|
{"/forum/model.py": ["/forum/__init__.py"], "/forum/api.py": ["/forum/__init__.py"]}
|
35,520
|
SanjayJohn21358/AudioClassifier
|
refs/heads/master
|
/graph_examples.py
|
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
%matplotlib inline
def load_sound_files(file_paths):
raw_sounds = []
for fp in file_paths:
X,sr = librosa.load(fp)
raw_sounds.append(X)
return raw_sounds
def plot_waves(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(25,60), dpi = 900)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
librosa.display.waveplot(np.array(f),sr=22050)
plt.title(n.title())
i += 1
plt.suptitle("Figure 1: Waveplot",x=0.5, y=0.915,fontsize=18)
plt.show()
def plot_specgram(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(25,60), dpi = 900)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
specgram(np.array(f), Fs=22050)
plt.title(n.title())
i += 1
plt.suptitle("Figure 2: Spectrogram",x=0.5, y=0.915,fontsize=18)
plt.show()
def plot_log_power_specgram(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(25,60), dpi = 900)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
D = librosa.logamplitude(np.abs(librosa.stft(f))**2, ref_power=np.max)
librosa.display.specshow(D,x_axis='time' ,y_axis='log')
plt.title(n.title())
i += 1
plt.suptitle("Figure 3: Log power spectrogram",x=0.5, y=0.915,fontsize=18)
plt.show()
"""
sound_file_paths = ["57320-0-0-7.wav","24074-1-0-3.wav","15564-2-0-1.wav","31323-3-0-1.wav",
"46669-4-0-35.wav","89948-5-0-0.wav","40722-8-0-4.wav",
"103074-7-3-2.wav","106905-8-0-0.wav","108041-9-0-4.wav"]
sound_names = ["air conditioner","car horn","children playing",
"dog bark","drilling","engine idling", "gun shot",
"jackhammer","siren","street music"]
raw_sounds = load_sound_files(sound_file_paths)
plot_waves(sound_names,raw_sounds)
plot_specgram(sound_names,raw_sounds)
plot_log_power_specgram(sound_names,raw_sounds)
"""
|
{"/main.py": ["/parser.py", "/model.py"]}
|
35,521
|
SanjayJohn21358/AudioClassifier
|
refs/heads/master
|
/parser.py
|
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
def extract_feature(file_name):
"""
extract mel-frequency cepstral coefficients, chromagraph,
spectral contrast, tonal centroid features from file
:file_name: input, file name, str
:mfccs: output, mel-frequency cepstral coefficients, list float
:chroma: output, chromagraph, list float
:mel: output, mel spectrogram, list float
:constrast: output, spectral constrast, list float
:tonnetz: output, tonal centroid features, list float
"""
X, sample_rate = librosa.load(file_name)
stft = np.abs(librosa.stft(X))
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),sr=sample_rate).T,axis=0)
return mfccs,chroma,mel,contrast,tonnetz
def parse_audio_files(parent_dir,sub_dirs,file_ext="*.wav"):
"""
parse all audio and extract features, attach corresonding labels
:parent_dir: input, parent directory, str
:sub_dirs: input, sub directory, str
:file_ext: input, file extension, str
:features: output, features from extract_feature, np array
:labels: output, labels of audio files, np array
"""
features, labels = np.empty((0,193)), np.empty(0)
for label, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
try:
mfccs, chroma, mel, contrast,tonnetz = extract_feature(fn)
except Exception as e:
print("Error encountered while parsing file: ", fn)
continue
ext_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz])
features = np.vstack([features,ext_features])
labels = np.append(labels, fn.split('/')[3].split('-')[1]) #labels are present in filename
return np.array(features), np.array(labels, dtype = np.int)
def one_hot_encode(labels):
"""
returns one-hot encoding of labels for use in NN
"""
n_labels = len(labels)
n_unique_labels = len(np.unique(labels))
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return one_hot_encode
|
{"/main.py": ["/parser.py", "/model.py"]}
|
35,522
|
SanjayJohn21358/AudioClassifier
|
refs/heads/master
|
/model.py
|
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import sklearn.metrics
from matplotlib.pyplot import specgram
def create(tr_features,tr_labels,ts_features,ts_labels):
"""
define neural network through TensorFlow
:tr_features: input, features of training set, np array
:tr_labels: input, labels of training set (one-hot encoded), np array
:ts_features: input, features of test set, np array
:ts_labels: input, labels of test set (one-hot encoded), np array
"""
#set parameters
training_epochs = 50
n_dim = tr_features.shape[1]
n_classes = 10
n_hidden_units_one = 280
n_hidden_units_two = 300
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01
#set placeholders for inputs and outputs
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
#set weights and biases of layer 1
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
#set activation function of layer 1 (sigmoid)
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
#set weights and biases of layer 2
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two],
mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
#set activation function of layer 2 (sigmoid)
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
#set weights and biases of final layer
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
#set activation function of final layer (softmax), y_ is final output
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.initialize_all_variables()
#set cost function
cost_function = -1*tf.reduce_sum(Y * tf.log(y_))
#use gradient descent as optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
#set correct prediction variable
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
#set accuracy variable
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
#run network
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
_,cost = sess.run([optimizer,cost_function],feed_dict={X:tr_features,Y:tr_labels})
cost_history = np.append(cost_history,cost)
y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})
y_true = sess.run(tf.argmax(ts_labels,1))
print("Test accuracy: ",round(session.run(accuracy,
feed_dict={X: ts_features,Y: ts_labels}),3))
fig = plt.figure(figsize=(10,8))
plt.plot(cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
p,r,f,s = sklearn.metrics.precision_recall_fscore_support(y_true, y_pred, average="micro")
print("F-Score:", round(f,3))
return p,r,f,s
|
{"/main.py": ["/parser.py", "/model.py"]}
|
35,523
|
SanjayJohn21358/AudioClassifier
|
refs/heads/master
|
/main.py
|
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import parser
import model
#set dataset directory
parent_dir = 'UrbanSound8k/audio'
#set training and testing directories
#tr_sub_dirs = ["fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9"]
tr_sub_dirs = ['fold2','fold3','fold5']
ts_sub_dirs = ["fold10"]
#get features and labels
tr_features, tr_labels = parser.parse_audio_files(parent_dir,tr_sub_dirs)
ts_features, ts_labels = parser.parse_audio_files(parent_dir,ts_sub_dirs)
#encode labels
tr_labels = parser.one_hot_encode(tr_labels)
ts_labels = parser.one_hot_encode(ts_labels)
#run model
p,r,f,s = model.create(tr_features,tr_labels,ts_features,ts_labels)
|
{"/main.py": ["/parser.py", "/model.py"]}
|
35,524
|
spudtrooper/arduino-office-communicator
|
refs/heads/master
|
/repl.py
|
#!/usr/bin/env python
#
# Sends what you type to the serial port. For testing.
#
import sys
import re
import os
from common import *
def main(argv):
serialInit()
while True:
str = raw_input('[0-9|q]> ')
if re.match(r'q',str):
break
try:
serialSend(int(str))
except:
pass
if __name__ == '__main__':
main(sys.argv)
|
{"/repl.py": ["/common.py"]}
|
35,525
|
spudtrooper/arduino-office-communicator
|
refs/heads/master
|
/server.py
|
#!/usr/bin/env python
#
# Communicates with the arduino and responds to changes in Office
# Communicator. You can pass in an optional port, default is 8123.
#
# Examples:
#
# server.py # port 8123
# server.py 8181 # port 8181
#
import BaseHTTPServer
import urlparse
import urllib
import string
import cgi
import time
import sys
import re
import os
from urlparse import urlparse
from common import *
# Routing regular expressions
STATUS_RE = re.compile('^\/StatusUpdate.*status=(\d+).*')
VALIDFILE_RE = re.compile('^\/index.html|^\/|\/.*\..js|\/.*\..png$')
def parse_url_args(url):
p = urlparse(url)
lst = [part.split('=') for part in p[4].split('&')]
return {it[0]: it[1] for it in lst}
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def contentType(self,path):
"""
Returns the Content-type for the full url path
"""
if path.endswith('.html'):
return 'text/html'
if path.endswith('.js'):
return 'text/javascript'
if path.endswith('.png'):
return 'image/png'
def do_GET(self):
path = self.path
just_path = re.sub(r'\?.*','',path)
if just_path == '/':
path = re.sub(r'^\/','/index.html',just_path)
just_path = path
# Try to respond to a status update
if re.match(STATUS_RE,path):
args = parse_url_args(path)
status = args.get('status')
num = args.get('num')
if status != None and num != None:
self.send_response(200)
serialSend(int(num))
serialSend(int(status))
return
elif status != None:
self.send_response(200)
serialSend(int(status))
return
# Otherwise, just do the file
if re.match(VALIDFILE_RE,path):
ctype = self.contentType(path)
if ctype is not None:
fname = os.curdir + os.sep + path
if os.path.exists(fname):
f = open(fname)
self.send_response(200)
self.send_header('Content-type',ctype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
# Not found
self.send_response(404)
def main(argv):
# ALlow the user to pass in a port
httpPort = 8123
if len(argv) > 1:
httpPort = int(argv[1])
serialInit()
try:
server = BaseHTTPServer.HTTPServer(('', httpPort), MyHandler)
print 'Started HTTP server on port %d' % httpPort
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main(sys.argv)
|
{"/repl.py": ["/common.py"]}
|
35,526
|
spudtrooper/arduino-office-communicator
|
refs/heads/master
|
/common.py
|
# Requirements:
#
# http://pypi.python.org/pypi/pyserial
import serial
import glob
import os
# Global serial port
serialPort = None
def serialInit(serialPortStr=None):
"""
Sets up serial connection and returns the device name
"""
global serialPort
if serialPortStr is None:
serialPortStr = findSerialPort()
serialPort = serial.Serial(serialPortStr, 9600)
print 'Using serial port: %s - %s' % (serialPortStr,(serialPort is not None))
return serialPortStr
def serialSend(n):
"""
Sends a byte to the serial port 'serialPort'
"""
global serialPort
c = chr(int(n) + 48)
print 'Sending serial %d - %r' % (n,c)
try:
serialPort.write(c)
except:
print "Could not send %d" % n
def findSerialPort():
"""
TODO: This probably has to change
"""
if os.name == 'windows':
return 'COM5'
if os.name == 'posix':
for port in glob.glob('/dev/tty*usb*') + glob.glob('/dev/*usb*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*'):
return port
|
{"/repl.py": ["/common.py"]}
|
35,528
|
lechdo/flow_application_example
|
refs/heads/master
|
/main.py
|
# encoding:utf-8
from datetime import datetime, timedelta
from dummy import Flow, Dummy, steps, get_out_queue
flow = Flow()
# temps de travail
Working_time = 30
class MakeMyCodeDreamsTrue:
def __enter__(self):
"""
quand tu utilise le context manager, tu permet une initialisation, tu peux modifier des fonctions,
des classes, d'autres contextes... etc. mais tu peux aussi simplement initialiser des variables
ou just le manager, le manager c'est littéralement ce que retourne __enter__ tout simplement
:return:
"""
[flow.process_event.put(ele) for ele in steps]
# initialise une variable de statut
self.step = None
def inner():
nonlocal self
the_end = datetime.now() + timedelta(seconds=Working_time)
while True:
# indication de la fin : condition de temp dépassée ET information non passée encore
if datetime.now() > the_end and get_out_queue.empty():
get_out_queue.put(Ellipsis)
# indication de fin de programme : alert fin donnée ET dernière étape d'un cycle passée.
if not get_out_queue.empty() and self.step is steps.four:
break
# réinitialisation d'un cycle, si la queue est vide ou à la dernière étape, on refait le plein
if self.step is steps.four or None:
[flow.process_event.put(ele) for ele in steps]
# passage à l'étape suivante
self.step = flow.process_event.get()
yield True
self.__exit__(StopIteration, 0, None)
manager = inner()
return manager
def __exit__(self, exc_type, exc_val, exc_tb):
"""
action à faire lorsqu'on sort du contexte.
:return:
"""
def main(self):
"""
Fonction d'exécution; ici pour sortir de la boucle, on va utiliser le temps, ce paramètre peut
etre assez facilement changé.
La fonction fonctionne comme suit:
- la classe utilise son propre contexte. Elle génère un générateur, qui lui même génère un True à chaque
tour tant qu'on ne lui a pas dit d'arreter
- on fait un switcher, un dict qui contient en clé un objet évenement, et en valeur la fonction. On
le laisse appeler la fonction en fonction de la clé fourni.
Le context manager contient pour chaque tour l'algo technique:
- comment passer d'une étape à une autre
- que faire à la fin d'un cycle
- la vérification de l'alerte d'arrêt
- la génération de variables en fonction de la situation (heure de fin dans ce cas)
Par conséquent, cette structure est plus complexe qu'une simple boucle, mais permet de totalement séparer
le métier de la technique, qu'importe ce que l'on met dans la liste des étapes, qu'importe l'ordre des étapes
qu'importe le moment d'arrêt : les actions à faire seront placées ici, toujours séparé des regles de contexte.
La liste des étapes sera toujours du coté où sont codé les étapes, mais à part, et la regle de gestion de
l'application n'a au final rien à voir avec le contenu de l'application.
:return:
"""
dummy = Dummy()
# on fait un switch case pour lister tour à tour les étapes en fonction de l'identité de step
switcher = {
steps.one: dummy.work_hard,
steps.two: dummy.play_hard,
steps.three: dummy.danse_on_the_floor,
steps.four: dummy.sleep
}
# le contexte tourne en générateur, il produit True tant qu'il existe, s'il n'existe plus
# on sort alors de la boucle, puis du contexte.
with self as manager:
for _ in manager:
switcher[self.step]()
print("time out buddy!")
if __name__ == '__main__':
MakeMyCodeDreamsTrue().main()
|
{"/main.py": ["/dummy.py"], "/dummy.py": ["/utils/meta.py"]}
|
35,529
|
lechdo/flow_application_example
|
refs/heads/master
|
/utils/meta.py
|
# encoding:utf-8
class Singleton(type):
"""
Meta class, permet d'isoler le principe du singleton à toutes les classes qui hériterons.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
{"/main.py": ["/dummy.py"], "/dummy.py": ["/utils/meta.py"]}
|
35,530
|
lechdo/flow_application_example
|
refs/heads/master
|
/dummy.py
|
# encoding:utf-8
from utils.meta import Singleton
from queue import Queue
from collections import namedtuple
from time import sleep
Step = namedtuple("Step", "one two three four")
# pour gérer l'identité d'un marqueur on peut utiliser une instance de base object. Cette instance a sa
# propre identité mémoire, et on peut vérifier son identité par "is". Au final "object" est l'instance minimale
# dans python, donc l'utiliser, c'est optimiser le code et l'espace.
steps = Step(object(), object(), object(), object())
get_out_queue = Queue(maxsize=1)
# il y a 2 singletons de base dans Python, None et Ellipsis. None a une fonction très utilisée, mais Ellispsis
# est moins courant (pour gérer les paramètres), bref, on peut se servir d'Ellipsis comme killer
class Flow(metaclass=Singleton):
"""
Classe contenant le flux. Singleton
"""
def __init__(self):
# Une queue est un outil de pile qui peut passer au travers du threading et de diverses classes
# Dans le cas d'une suite d'évenement, on peut identifier le contenu en ayant l'assurance qu'il ne
# sera extrait qu'une seule fois
self.process_event = Queue()
class Dummy:
"""
Classe bidon contenant des opérations à faire durant un cycle
"""
def danse_on_the_floor(self):
print("3 I love Lady Gaga, I promess")
sleep(2)
def work_hard(self):
print("1 My boss is a Dumb")
sleep(2)
def play_hard(self):
print("2 Fallout is my religion")
sleep(2)
def sleep(self):
print("4 Even Chuck Norris sleep sometimes")
sleep(2)
|
{"/main.py": ["/dummy.py"], "/dummy.py": ["/utils/meta.py"]}
|
35,542
|
jintwo/cfgen
|
refs/heads/master
|
/cfgen/utils.py
|
# -*- coding: utf-8 -*-
from os import getenv
import re
def walk(dict_, fn=lambda value: value):
result = {}
for key, value in dict_.iteritems():
if isinstance(value, dict):
result[key] = walk(value, fn)
elif isinstance(value, list):
result[key] = map(fn, value)
else:
result[key] = fn(value)
return result
def env(value):
if not isinstance(value, basestring):
return value
result = value
matches = re.findall(r'\$\(.*?\)', result)
if matches:
for m in matches:
result = result.replace(m, getenv(m[2:-1], ''))
return result
def subst(value, environ):
if not isinstance(value, basestring):
return value
result = value
matches = re.findall(r'\$\{.*?\}', result)
if matches:
for m in matches:
var_name = m[2:-1]
result = result.replace(m, environ.get(var_name))
return result
# def include(value):
# if not isinstance(value, basestring):
# return value
# result = value
# if value.startswith('$include(') and value.endswith(')'):
# filename = result.replace('$include(', '')[:-1]
# result = json.loads(open(filename, 'r').read())
# return result
|
{"/cfgen/cli.py": ["/cfgen/utils.py", "/cfgen/renderer.py", "/cfgen/parser.py"]}
|
35,543
|
jintwo/cfgen
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import cfgen
setup(
name='CFGen',
author='Eugeny Volobuev',
author_email='qulert@gmail.com',
version=cfgen.__version__,
url='http://github.com/jintwo/cfgen',
install_requires=[
'Jinja2>=2.6',
'pyrsistent>=0.6.3'
],
extras_require={
'yaml_parser': ['PyYAML>=3.11']
},
packages=find_packages(),
entry_points={
'console_scripts': [
'cfgen = cfgen.cli:main'
]
}
)
|
{"/cfgen/cli.py": ["/cfgen/utils.py", "/cfgen/renderer.py", "/cfgen/parser.py"]}
|
35,544
|
jintwo/cfgen
|
refs/heads/master
|
/cfgen/parser.py
|
# -*- coding: utf-8 -*-
import json
import warnings
def _get_parser_map():
result = {'json': JSONConfigParser}
try:
import yaml
except ImportError:
warnings.warn('PyYAML not found.')
else:
result['yaml'] = YAMLConfigParser
return result
def get_parser(config_type):
parser_cls = __parser_map.get(config_type)
if parser_cls is None:
parser_cls = JSONConfigParser
return parser_cls
class BaseConfigParser(object):
def parse(self, buf):
raise NotImplementedError()
def parse_file(self, filename):
with open(filename, 'r') as f:
return self.parse(f.read())
class JSONConfigParser(BaseConfigParser):
def parse(self, buf):
return json.loads(buf)
class YAMLConfigParser(BaseConfigParser):
def parse(self, buf):
import yaml
return yaml.load(buf)
__parser_map = _get_parser_map()
|
{"/cfgen/cli.py": ["/cfgen/utils.py", "/cfgen/renderer.py", "/cfgen/parser.py"]}
|
35,545
|
jintwo/cfgen
|
refs/heads/master
|
/cfgen/renderer.py
|
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader, Template
class BaseRenderer(object):
def __init__(self, templates_path):
self.templates_path = templates_path
def render(self, buf, data):
raise NotImplementedError()
def render_file(self, filename, data):
with open(filename, 'r') as f:
return self.render(f.read(), data)
def render_template(self, template_name, data):
raise NotImplementedError()
class JinjaRenderer(BaseRenderer):
def __init__(self, templates_path):
super(JinjaRenderer, self).__init__(templates_path)
self.env = Environment(loader=FileSystemLoader(self.templates_path))
def render(self, buf, data):
return Template(buf).render(**data)
def render_template(self, template_name, data):
return self.env.get_template(template_name).render(**data)
def _get_renderer_map():
return {'jinja': JinjaRenderer}
__renderer_map = _get_renderer_map()
def get_renderer(renderer_type):
renderer_cls = __renderer_map.get(renderer_type)
if renderer_cls is None:
renderer_cls = JinjaRenderer
return renderer_cls
|
{"/cfgen/cli.py": ["/cfgen/utils.py", "/cfgen/renderer.py", "/cfgen/parser.py"]}
|
35,546
|
jintwo/cfgen
|
refs/heads/master
|
/cfgen/cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import codecs
from glob import glob
import warnings
from os import path
from pyrsistent import pmap
from .utils import walk, env, subst
from .renderer import get_renderer
from .parser import get_parser
def _create_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('settings', help='Settings file.')
parser.add_argument('profile', help='Profile name.')
parser.add_argument(
'-t', '--templates', help='Templates dir.', default='templates')
parser.add_argument(
'-o', '--output', help='Output dir.', default='output')
parser.add_argument(
'-p',
'--parser',
help='Config parser type.',
default='json',
choices=['json', 'yaml'])
parser.add_argument(
'-r',
'--renderer',
help='Template renderer type.',
default='jinja2',
choices=['jinja2'])
return parser
def _parse_config(parser_type, config_filename):
parser_cls = get_parser(parser_type)
parser = parser_cls()
return parser.parse_file(config_filename)
def _prepare_renderer(renderer_type, templates_path):
renderer_cls = get_renderer(renderer_type)
return renderer_cls(templates_path)
def _eval_val(val, params):
return env(subst(val, params))
def _eval_params(params):
return walk(params, lambda val: _eval_val(val, params))
def _render(renderer, template_name, params):
return renderer.render_template(template_name, _eval_params(params))
def _get_params(profiles_dict, profile_name):
return pmap().update(profiles_dict.get('_', {}))\
.update(profiles_dict.get(profile_name, {}))
def main():
args = _create_arg_parser().parse_args()
config = _parse_config(args.parser, args.settings)
renderer = _prepare_renderer(args.renderer, args.templates)
profiles = pmap(config.get('profiles', {}))
templates = config.get('templates')
if templates is None:
raise Exception('Invalid templates')
# parse templates
if isinstance(templates, basestring):
templates = map(path.basename,
glob(path.join(args.templates, templates)))
if isinstance(templates, list):
templates = pmap({t: {'output': t} for t in templates})
if isinstance(templates, dict):
templates = pmap(templates)
for template_name, data in templates.items():
template_profiles = data.get('profiles', {})
if (
args.profile not in profiles and
args.profile not in template_profiles
):
warnings.warn(
'Profile <{}> not found for <{}>'.format(
args.profile, template_name))
output_filename = data.get('output', template_name)
if not output_filename:
raise Exception('Invalid output file name.')
params = pmap().update(_get_params(profiles, args.profile))\
.update(_get_params(template_profiles, args.profile))\
.update({'profile': args.profile})
output_data = _render(renderer, template_name, params)
output_filename = _eval_val(output_filename, params)
output_path = path.join(args.output, output_filename)
with codecs.open(output_path, 'w', 'utf8') as output_file:
output_file.write(output_data)
if __name__ == '__main__':
main()
|
{"/cfgen/cli.py": ["/cfgen/utils.py", "/cfgen/renderer.py", "/cfgen/parser.py"]}
|
36,054
|
DanWertheimer/pyJDBCConnector
|
refs/heads/master
|
/pyjdbcconnector/__init__.py
|
# Version
__version__ = '0.2.1'
|
{"/setup.py": ["/pyjdbcconnector/__init__.py"], "/tests/test_config.py": ["/pyjdbcconnector/connectors.py"]}
|
36,055
|
DanWertheimer/pyJDBCConnector
|
refs/heads/master
|
/setup.py
|
import setuptools
from pyjdbcconnector import __version__
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='pyjdbcconnector',
version=__version__,
description='A high level JDBC API',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/DanWertheimer/pyJDBCConnector',
download_url='https://github.com/DanWertheimer/pyJDBCConnector/archive/v0.2.1.tar.gz',
author='Daniel Wertheimer',
author_email='danwertheimer@gmail.com',
packages=setuptools.find_packages(),
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
zip_safe=False,
python_requires='>=3.6',
install_requires=[
'JPype1 == 0.6.3',
'JayDeBeApi >= 1.1.1',
'PyHive == 0.6.2'
],
project_urls={
'Documentation': 'https://pyjdbcconnector.readthedocs.io/en/latest/',
'Source': 'https://github.com/DanWertheimer/pyJDBCConnector',
'Say Thanks!': 'https://saythanks.io/to/danwertheimer%40gmail.com'
},
)
|
{"/setup.py": ["/pyjdbcconnector/__init__.py"], "/tests/test_config.py": ["/pyjdbcconnector/connectors.py"]}
|
36,056
|
DanWertheimer/pyJDBCConnector
|
refs/heads/master
|
/tests/test_config.py
|
import configparser
import pytest
from pyjdbcconnector.connectors import DenodoConnector, HiveConnector
def test_denodo():
dc = DenodoConnector()
dc.from_config("./tests/utils/denodo_config.ini")
assert dc.connection_url == 'test_url'
assert dc.username == 'test_username'
assert dc.password == 'test_password'
assert dc.trust_store_location == '/trust/store/location'
assert dc.trust_store_password == 'trust_store_password'
assert dc.jdbc_location == '/path/to/denodo-vdp-jdbcdriver.jar'
assert dc.java_classname == 'com.denodo.vdp.jdbc.Driver'
def test_hive():
dc = HiveConnector()
dc.from_config("./tests/utils/hive_config.ini")
assert dc.host == 'host.name'
assert dc.port == 10000
assert dc.database == 'db'
assert dc.username == 'test_username'
assert dc.auth_method == 'KERBEROS'
assert dc.kerberos_service_name == 'hive'
|
{"/setup.py": ["/pyjdbcconnector/__init__.py"], "/tests/test_config.py": ["/pyjdbcconnector/connectors.py"]}
|
36,057
|
DanWertheimer/pyJDBCConnector
|
refs/heads/master
|
/pyjdbcconnector/connectors.py
|
import configparser
from abc import ABC, abstractmethod
from typing import Any, Optional, Union
import jaydebeapi
import jpype
from pyhive import hive
Connection = Union[jaydebeapi.Connection, hive.Connection]
Connector = Union['DenodoConnector', 'HiveConnector']
class BaseConnector(ABC):
@abstractmethod
def from_config(self, config_path) -> 'BaseConnector':
"""loads the parameters for the connector from a config file
:raises NotImplementedError: this method must be implemented
"""
raise NotImplementedError
@abstractmethod
def connect(self) -> Connection:
"""a connect method that returns a connection object
for a particular module
:raises NotImplementedError: this method must be implemented
:return: a connection object which can be used as a query connection to the database
:rtype: Connection
"""
raise NotImplementedError
@abstractmethod
def disconnect(self):
"""a method to disconnect/close the currently active connection
:raises NotImplementedError: this method must be implemented
"""
raise NotImplementedError
class DenodoConnector(BaseConnector):
def from_config(self, config_path) -> Connector:
config = _load_config(config_path)
_check_config_exists(config)
if config.has_section('connection'):
self.connection_url = config.get(
'connection', 'connection_url')
self.username = config.get('connection', 'username')
self.password = config.get('connection', 'password')
else:
raise AttributeError("'connection' not found")
if config.has_section('jdbc'):
self.jdbc_location = config.get('jdbc', 'jdbc_location')
self.java_classname = config.get(
'jdbc', 'java_classname', fallback='com.denodo.vdp.jdbc.Driver')
else:
print("could not find 'jdbc' config")
if config.has_section('trust_store'):
self.trust_store_location = config.get(
'trust_store', 'trust_store_location')
self.trust_store_password = config.get(
'trust_store', 'trust_store_password')
self.trust_store_required = True
else:
self.trust_store_required = False
return self
def configure_jdbc(
self, jdbc_location: str, java_classname: str = "com.denodo.vdp.jdbc.Driver"
) -> Connector:
"""sets the jdbc connection information
:param jdbc_location: location of the jdbc .jar file on your system
:type jdbc_location: str
:param java_classname: java class name for the jdbc, defaults to 'com.denodo.vdp.jdbc.Driver' for Denodo
:type java_classname: str, optional
:return: a DenodoConnector object
:rtype: DenodoConnector
"""
self.jdbc_location = jdbc_location
self.java_classname = java_classname
return self
def set_trust_store(
self, trust_store_location: str, trust_store_password: str
) -> Connector:
"""sets the trust store location for SSL connection
:param trust_store_location: location of the .jks file on system
:type trust_store_location: str
:param trust_store_password: password for the .jks file
:type trust_store_password: str
:return: a DenodoConnector object
:rtype: DenodoConnector
"""
self.trust_store_location = trust_store_location
self.trust_store_password = trust_store_password
return self
def require_trust_store(self) -> Connector:
self.trust_store_required = True
return self
def connect(self) -> Connection:
"""connect through a jdbc string
:param connection_url: a valid jdbc connection string
:type connection_url: str
:param username: username to connect to the jdbc source
:type username: str
:param password: password to connect to the jdbc source
:type password: str
:return: a jaydebeapi connection object which can be read through pandas
:rtype: jaydebeapi.Connection
"""
if self.trust_store_required:
_startJVM(self.trust_store_location,
self.trust_store_password,
self.jdbc_location)
# Create connection
connection = jaydebeapi.connect(
jclassname=self.java_classname,
url=self.connection_url,
driver_args=[self.username, self.password],
jars=self.jdbc_location,
)
self.connection = connection
return connection
def disconnect(self):
_stopJVM()
self.connection = ''
return self
class HiveConnector(BaseConnector):
def from_config(self, config_path) -> Connector:
config = _load_config(config_path)
_check_config_exists(config)
if not 'connection' in config.sections():
raise AttributeError("connection not found")
else:
self.host = config.get('connection', 'host')
self.port = int(config.get(
'connection', 'port', fallback=10000))
self.database = config.get('connection', 'database')
self.username = config.get('connection', 'username')
self.auth_method = config.get(
'connection', 'auth_method', fallback='KERBEROS')
self.kerberos_service_name = config.get(
'connection', 'kerberos_service_name', fallback='hive')
return self
def connect(self) -> Connection:
connection = hive.connect(host=self.host,
port=self.port,
database=self.database,
username=self.username,
auth=self.auth_method,
kerberos_service_name=self.kerberos_service_name)
self.connection = connection
return connection
def disconnect(self) -> Connector:
if self.connection:
print("ending active session")
self.connection.close()
self.connection = ''
else:
print("there is no active session")
return self
def _startJVM(trust_store_location, trust_store_password, jdbc_location):
# Initialize the JVM
jvmPath = jpype.getDefaultJVMPath()
if jpype.isJVMStarted():
return print("JVM is already running")
else:
print("starting JVM")
jpype.startJVM(
jvmPath,
f"-Djavax.net.ssl.trustStore={trust_store_location}",
f"-Djavax.net.ssl.trustStorePassword={trust_store_password}",
f"-Djava.class.path={jdbc_location}",
)
def _stopJVM():
jpype.shutdownJVM()
def _load_config(config_path):
config = configparser.ConfigParser()
config.read(config_path)
return config
def _check_config_exists(config):
if len(config.sections()) == 0:
raise FileNotFoundError("Failed to open/find configuration file")
else:
print("Loaded config successfully")
|
{"/setup.py": ["/pyjdbcconnector/__init__.py"], "/tests/test_config.py": ["/pyjdbcconnector/connectors.py"]}
|
36,062
|
rembold-cs151-master/HW08
|
refs/heads/master
|
/test_hw8.py
|
# IMPORTANT!
# You don't need to do anything with this file
# It is only to provide some automated testing
# to give you feedback on if your code is working
# correctly! Please do not change!
import pytest
import os
import Prob2
import Prob3
def numcheck(num, ans, tol=0.02):
return (ans*(1-tol) < num < ans*(1+tol))
class Test_Prob1:
def test_pdf_present(self):
assert os.path.isfile('HW8.pdf') == True
class Test_Prob2:
def report(self,args):
return f"\nProgram fails to get the correct value with parameters of {args}."
def test_can_create_instance(self):
x = Prob2.Queue()
assert isinstance(x, Prob2.Queue)
def test_q_has_length(self):
x = Prob2.Queue()
assert len(x.q) == 0, 'Queue not initially empty or no data attribute self.q exists?'
def test_q_add_3(self):
x = Prob2.Queue()
x.add(1)
x.add('hi')
x.add((2,5))
assert len(x.q) == 3, 'Queue is improper length after adding 3 items?'
def test_remove(self):
x = Prob2.Queue()
x.add(1)
x.add(2)
x.add(3)
value = x.remove()
assert len(x.q) == 2, 'Value was not actually removed from the queue?'
assert value == 1, f'Incorrect value returned from queue. Expected "1" and got "{value}".'
def test_q_empty(self):
x = Prob2.Queue()
x.add('bazinga')
v = x.remove()
v = x.remove()
assert v == 'The queue is empty!', 'Method not handling an empty queue properly! Are you returning the exact string correctly?'
class Test_Prob3:
def test_can_create_instance(self):
A = Prob3.Fraction(1,2)
assert isinstance(A, Prob3.Fraction)
def test_prints_nicely(self):
vals = {
(1,2):'1/2',
(5,2):'5/2',
(4,8):'4/8',
(9,3):'9/3'
}
for key in vals:
A = Prob3.Fraction(*key)
assert str(A) == vals[key], f'The fraction of Fraction{key} did not print properly as {vals[key]}'
def test_reduces_proper_value(self):
vals = {
(1,2):'1/2',
(3,6):'1/2',
(8,24):'1/3',
(10,100):'1/10'
}
for key in vals:
A = Prob3.Fraction(*key)
assert str(A.reduce()) == vals[key], f'The fraction of Fraction{key} did not properly reduce to a printed value of {vals[key]}.'
assert isinstance(A.reduce(), Prob3.Fraction), 'You should still be returning a Fraction type object, but you are not.'
def test_reduces_proper_mutability(self):
A = Prob3.Fraction(4,8)
B = Prob3.Fraction(4,8)
C = B.reduce()
assert str(A) == str(B), 'In the process of reducing you changed the value of your Fraction. You want to return a NEW FRACTION without changing anything in place!'
def test_float_conversion(self):
vals = {
(4,5):float(4/5),
(2,3):float(2/3),
(7,1):float(7/1),
}
for key in vals:
A = Prob3.Fraction(*key)
assert float(A) == vals[key], f'Conversion to a float is not equaling the desired value of {vals[key]} for Fraction{key}.'
def test_inverse(self):
vals = {
(3,2):'2/3',
(1,8):'8/1',
(2,16):'16/2'
}
for key in vals:
A = Prob3.Fraction(*key)
assert str(A.inverse()) == vals[key], f'The inverse is not correct. Should be {vals[key]} but is getting a printed value of {str(A.inverse())}'
assert isinstance(A.inverse(), Prob3.Fraction), 'You should be returning a Fraction object type.'
def test_multiply_fractions(self):
vals = {
((1,2),(1,2)):'1/4',
((3,4),(1,2)):'3/8',
((6,3),(1,8)):'6/24',
}
for key in vals:
A = Prob3.Fraction(*key[0])
B = Prob3.Fraction(*key[1])
assert str(A*B) == vals[key], f'Multiplying Fraction{key[0]} by Fraction{key[1]} should equal {vals[key]} but is equaling {str(A*B)}'
assert isinstance(A*B, Prob3.Fraction), 'Multiplying two fractions should return an object of type Fraction.'
def test_multiply_fraction_by_integer(self):
vals = {
((1,2),3):'3/2',
((3,4),2):'6/4',
((8,5),10):'80/5'
}
for key in vals:
A = Prob3.Fraction(*key[0])
B = key[1]
assert str(A*B) == vals[key], f'Multiplying Fraction{key[0]} by {key[1]} should give {vals[key]} but instead gives {str(A*B)}.'
assert str(B*A) == vals[key], f'Multiplying {key[1]} by Fraction{key[0]} should give {vals[key]} but instead gives {str(A*B)}.'
assert isinstance(A*B, Prob3.Fraction), 'Multiplying a fraction by an integer should return an object of type Fraction.'
def test_divide_by_fraction(self):
vals = {
((1,2),(1,2)):'2/2',
((3,4),(1,2)):'6/4',
((6,3),(1,8)):'48/3',
}
for key in vals:
A = Prob3.Fraction(*key[0])
B = Prob3.Fraction(*key[1])
assert str(A/B) == vals[key], f'Dividing Fraction{key[0]} by Fraction{key[1]} should equal {vals[key]} but is equaling {str(A/B)}'
assert isinstance(A*B, Prob3.Fraction), 'Dividing a fraction by another fraction should return an object of type Fraction.'
def test_divide_by_integer(self):
vals = {
((1,2),3):'1/6',
((3,4),4):'3/16',
((6,3),2):'6/6',
}
for key in vals:
A = Prob3.Fraction(*key[0])
B = key[1]
assert str(A/B) == vals[key], f'Dividing Fraction{key[0]} by {key[1]} should equal {vals[key]} but is equaling {str(A/B)}'
assert isinstance(A*B, Prob3.Fraction), 'Dividing a fraction by an integer should return an object of type Fraction.'
|
{"/test_hw8.py": ["/Prob2.py", "/Prob3.py"]}
|
36,063
|
rembold-cs151-master/HW08
|
refs/heads/master
|
/Prob3.py
|
##################################################
# Name:
# Collaborators:
# Est Time Spent (hrs):
##################################################
# Class definition
class Fraction:
def __init__():
pass
def __str__():
'''
Returns the string numerator/denominator (with no spaces)
Usage:
>>> A = Fraction(4,6)
>>> print(A)
4/6
'''
pass
def reduce():
'''
Returns a new Fraction object of self in simplest form. Does
NOT simplify self in-place.
Usage of a method to find the greatest common divisor may be
useful here, which we have looked at earlier in the semester.
Output:
- (Fraction): simplified version of self
Usage:
>>> A = Fraction(8,24)
>>> print(A.reduce())
1/3
'''
pass
def __float__():
'''
Returns self as a floating point number.
Output:
- (float): self as a floating point number
Usage:
>>> float(Fraction(1,2))
0.5
'''
pass
def inverse():
'''
Returns the inverse of self.
Output:
- (Fraction): inverse of self
Usage:
>>> A = Fraction(2,3)
>>> print(A.inverse())
3/2
'''
pass
def __mul__():
'''
Returns the output of self * (either another Fraction or an integer)
Inputs (beyond self):
- other (Fraction or Int): The value that is multiplied by self
Output:
- (Fraction): the result of the multiplication
Example Usage:
>>> A = Fraction(1,2)
>>> B = Fraction(4,5)
>>> print(A*B)
4/10
>>> print(A*3)
3/2
'''
pass
def __rmul__():
'''
Returns the output of (Fraction or integer) * self
The reverse multiplication direction as __mul__. Same inputs and outputs.
Example Usage:
>>> A = Fraction(1,2)
>>> B = Fraction(4,5)
>>> print(B*A)
4/10
>>> print(3*A)
3/2
'''
pass
def __truediv__():
'''
Returns the output of self / (fraction or integer)
Inputs (beyond self):
- other (Fraction or int): The value for self to be divided by
Output:
- (Fraction): the result of the division
Example Usage:
>>> A = Fraction(1,2)
>>> B = Fraction(4,5)
>>> print(A/B)
5/8
>>> print(A/6)
1/12
'''
pass
# Remember that if you don't want to test things
# in the console manually you can add testing lines
# below the following 'if' statement to have them run
# when the program is run directly but not to
# interfere with the autotesting when the program
# is imported
if __name__ == '__main__':
# Creating a instance of Fraction
A = Fraction(1,2)
|
{"/test_hw8.py": ["/Prob2.py", "/Prob3.py"]}
|
36,064
|
rembold-cs151-master/HW08
|
refs/heads/master
|
/Prob2.py
|
##################################################
# Name:
# Collaborators:
# Est Time Spent (hrs):
##################################################
# Class definition
class Queue:
pass #<-- remove once you've defined your methods
# Define your init method
# Define your add method
# Define your remove method
# Remember that if you don't want to test things
# in the console manually you can add testing lines
# below the following 'if' statement to have them run
# when the program is run directly but not to
# interfere with the autotesting when the program
# is imported
if __name__ == '__main__':
# Creating a instance of Queue
q = Queue()
|
{"/test_hw8.py": ["/Prob2.py", "/Prob3.py"]}
|
36,083
|
hanguyenhant/do-an-3
|
refs/heads/master
|
/LDA.py
|
import gensim
from gensim import corpora
class LDA:
def load_data(self, path):
self._data = []
with open(path, encoding="utf-8") as f:
lines = f.read().splitlines()
for line in lines:
content = line.split('<fff>')[1]
self._data.append([word for word in content.split()])
def create_dictionary(self):
self._dictionary = corpora.Dictionary(self._data)
self._dictionary.filter_extremes(no_below=10, no_above=.9)
def implement_lda(self):
doc_term_matrix = [self._dictionary.doc2bow(doc) for doc in self._data]
Lda = gensim.models.ldamodel.LdaModel
ldamodel = Lda(doc_term_matrix, num_topics=8, id2word = self._dictionary, passes=50)
print(ldamodel.print_topics(num_topics=8, num_words=5))
lda = LDA()
lda.load_data('clean_data.txt')
lda.create_dictionary()
lda.implement_lda()
|
{"/main.py": ["/DataCollection.py", "/DataReader.py"]}
|
36,084
|
hanguyenhant/do-an-3
|
refs/heads/master
|
/main.py
|
from DataCollection import DataCollection
from DataReader import DataReader
# data_collection = DataCollection()
# data_collection.connect_database()
# data_collection.collect_data_from_vnexpress()
# data_collection.collect_data_from_vietnamnet()
# data_collection.collect_data_from_tuoitre()
# data_collection.collect_data_from_24h()
# data_collection.collect_data_from_thanhnien()
# data_collection.collect_data_from_nguoilaodong()
# data_collection.save_to_database()
# data_reader = DataReader()
# data_reader.connect_database()
# data_reader.load_topics()
# data_reader.clean_data()
# data_reader.save_text_processed('clean_data.txt')
|
{"/main.py": ["/DataCollection.py", "/DataReader.py"]}
|
36,085
|
hanguyenhant/do-an-3
|
refs/heads/master
|
/DataCollection.py
|
import pymysql.cursors
import feedparser
from urllib.request import urlopen
from urllib.request import build_opener
from bs4 import BeautifulSoup
from http.client import IncompleteRead
class DataCollection:
def __init__(self):
self._baivietList = list()
def connect_database(self):
#1. Kết nối vào database
self._connection = pymysql.connect(host='127.0.0.1',
user='root',
password='123456',
db='baiviet',
charset='utf8',
)
def collect_data_from_vnexpress(self):
#2. Lấy link rss và thể loại - tiêu đề - nội dung - link bài viết.
#==> Lưu vào CSDL
list_rss = ['https://vnexpress.net/rss/the-gioi.rss', 'https://vnexpress.net/rss/the-thao.rss',
'https://vnexpress.net/rss/phap-luat.rss', 'https://vnexpress.net/rss/kinh-doanh.rss',
'https://vnexpress.net/rss/so-hoa.rss', 'https://vnexpress.net/rss/giao-duc.rss',
'https://vnexpress.net/rss/suc-khoe.rss', 'https://vnexpress.net/rss/du-lich.rss']
# count=0
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[4].split('.')[0]
if the_loai == 'so-hoa':
the_loai = 'cong-nghe'
for post in d.entries:
if hasattr(post, 'link'):
# count+=1
# print("\n%d. %s - %s: %s" % (count, the_loai, post.title, post.link))
html = urlopen(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
content = bsObj.findAll("p", {"class":"Normal"})
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def collect_data_from_vietnamnet(self):
list_rss = ['https://vietnamnet.vn/rss/the-gioi.rss', 'https://vietnamnet.vn/rss/the-thao.rss',
'https://vietnamnet.vn/rss/phap-luat.rss', 'https://vietnamnet.vn/rss/kinh-doanh.rss',
'https://vietnamnet.vn/rss/cong-nghe.rss', 'https://vietnamnet.vn/rss/giao-duc.rss',
'https://vietnamnet.vn/rss/suc-khoe.rss']
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[4].split('.')[0]
for post in d.entries:
if hasattr(post, 'link'):
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
html = opener.open(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
content = bsObj.find("div", {"class":"ArticleContent"}).findAll('p')
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def collect_data_from_tuoitre(self):
list_rss = ['https://tuoitre.vn/rss/the-gioi.rss', 'https://tuoitre.vn/rss/the-thao.rss',
'https://tuoitre.vn/rss/phap-luat.rss', 'https://tuoitre.vn/rss/kinh-doanh.rss',
'https://tuoitre.vn/rss/nhip-song-so.rss', 'https://tuoitre.vn/rss/giao-duc.rss',
'https://tuoitre.vn/rss/suc-khoe.rss', 'https://tuoitre.vn/rss/du-lich.rss']
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[4].split('.')[0]
if the_loai == 'nhip-song-so':
the_loai = 'cong-nghe'
for post in d.entries:
if hasattr(post, 'link'):
# count+=1
# print("\n%d. %s - %s: %s" % (count, the_loai, post.title, post.link))
html = urlopen(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
if bsObj.find("div", {"id":"main-detail-body"}) != None:
content = bsObj.find("div", {"id":"main-detail-body"}).findAll('p')
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
# print(contentString)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def collect_data_from_24h(self):
#2. Lấy link rss và thể loại - tiêu đề - nội dung - link bài viết.
#==> Lưu vào CSDL
list_rss = ['https://www.24h.com.vn/upload/rss/thethao.rss', 'https://www.24h.com.vn/upload/rss/bongda.rss',
'https://www.24h.com.vn/upload/rss/congnghethongtin.rss', 'https://www.24h.com.vn/upload/rss/suckhoedoisong.rss',
'https://www.24h.com.vn/upload/rss/dulich24h.rss', 'https://www.24h.com.vn/upload/rss/giaoducduhoc.rss',
'https://www.24h.com.vn/upload/rss/anninhhinhsu.rss', 'https://www.24h.com.vn/upload/rss/taichinhbatdongsan.rss']
# count=0
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[5].split('.')[0]
if the_loai == 'bongda' or 'thethao':
the_loai = 'the-thao'
if the_loai == 'giaoducduhoc':
the_loai = 'giao-duc'
if the_loai == 'congnghethongtin':
the_loai = 'cong-nghe'
if the_loai == 'dulich24h':
the_loai = 'du-lich'
if the_loai == 'suckhoedoisong':
the_loai = 'suc-khoe'
if the_loai == 'taichinhbatdongsan':
the_loai = 'kinh-doanh'
if the_loai == 'anninhhinhsu':
the_loai = 'phap-luat'
for post in d.entries:
if hasattr(post, 'link'):
# count+=1
# print("\n%d. %s - %s: %s" % (count, the_loai, post.title, post.link))
html = urlopen(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
if bsObj.find("article", {"class":"nwsHt nwsUpgrade"}) != None:
content = bsObj.find("article", {"class":"nwsHt nwsUpgrade"}).findAll('p')
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def collect_data_from_thanhnien(self):
list_rss = ['https://thanhnien.vn/rss/the-gioi/goc-nhin.rss', 'https://thanhnien.vn/rss/viet-nam/phap-luat.rss',
'https://thanhnien.vn/rss/giao-duc/du-hoc.rss', 'https://thanhnien.vn/rss/giao-duc/tuyen-sinh.rss',
'https://thanhnien.vn/rss/giao-duc/nguoi-thay.rss', 'https://thanhnien.vn/rss/giao-duc/chon-nghe.rss',
'https://thanhnien.vn/rss/cong-nghe-thong-tin/san-pham-moi.rss', 'https://thanhnien.vn/rss/cong-nghe/xu-huong.rss',
'https://thanhnien.vn/rss/cong-nghe-thong-tin/y-tuong.rss', 'https://thanhnien.vn/rss/cong-nghe-thong-tin/kinh-nghiem.rss',
'https://thanhnien.vn/rss/suc-khoe/lam-dep.rss', 'https://thanhnien.vn/rss/doi-song/gioi-tinh.rss',
'https://thanhnien.vn/rss/suc-khoe/khoe-dep-moi-ngay.rss', 'https://thanhnien.vn/rss/suc-khoe/yeu-da-day.rss',
]
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[5].split('.')[0]
if the_loai == 'goc-nhin':
the_loai = 'the-gioi'
if the_loai == 'du-hoc' or 'tuyen-sinh' or 'chon-truong' or 'nguoi-thay' or 'chon-nghe':
the_loai = 'giao-duc'
if the_loai == 'san-pham-moi' or 'xu-huong' or 'y-tuong' or 'kinh-nghiem':
the_loai = 'cong-nghe'
if the_loai == 'lam-dep' or 'gioi-tinh' or 'khoe-dep-moi-ngay' or 'yeu-da-day':
the_loai = 'suc-khoe'
for post in d.entries:
if hasattr(post, 'link'):
# count+=1
# print("\n%d. %s - %s: %s" % (count, the_loai, post.title, post.link))
html = urlopen(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
if bsObj.find("div", {"id":"abody"}) != None:
content = bsObj.find("div", {"id":"abody"}).findAll('div')
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
# print(contentString)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def collect_data_from_nguoilaodong(self):
list_rss = ['https://nld.com.vn/thoi-su-quoc-te.rss', 'https://nld.com.vn/kinh-te.rss',
'https://nld.com.vn/phap-luat.rss', 'https://nld.com.vn/the-thao.rss',
'https://nld.com.vn/cong-nghe-thong-tin.rss', 'https://nld.com.vn/suc-khoe.rss',
'https://nld.com.vn/giao-duc-khoa-hoc.rss'
]
for link_rss in list_rss:
d = feedparser.parse(link_rss)
the_loai = link_rss.split('/')[3].split('.')[0]
if the_loai == 'thoi-su-quoc-te':
the_loai = 'the-gioi'
if the_loai == 'kinh-te':
the_loai = 'kinh-doanh'
if the_loai == 'cong-nghe-thong-tin':
the_loai = 'cong-nghe'
if the_loai == 'giao-duc-khoa-hoc':
the_loai = 'giao-duc'
for post in d.entries:
if hasattr(post, 'link'):
# count+=1
# print("\n%d. %s - %s: %s" % (count, the_loai, post.title, post.link))
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
html = opener.open(post.link)
bsObj = BeautifulSoup(html.read(), "html.parser")
if bsObj.find("div", {"id":"divNewsContent"}) != None:
content = bsObj.find("div", {"id":"divNewsContent"}).findAll('p')
contentList = list()
for i in range(len(content)-1):
contentList.append(content[i].get_text())
contentString = " ".join(contentList)
# print(contentString)
if len(contentString) > 21000:
contentString = contentString[:21000]
if contentString != "":
self._baivietList.append([the_loai, post.title, contentString, post.link])
def save_to_database(self):
#3. lưu vào CSDL
try:
with self._connection.cursor() as cursor:
sql = """INSERT INTO `bai_viet` (`the_loai`, `tieu_de`, `noi_dung`, `duong_dan`) VALUES (%s, %s, %s, %s)"""
for baiviet in self._baivietList:
cursor.execute(sql,(baiviet[0], baiviet[1], baiviet[2], baiviet[3]))
self._connection.commit()
finally:
self._connection.close()
|
{"/main.py": ["/DataCollection.py", "/DataReader.py"]}
|
36,086
|
hanguyenhant/do-an-3
|
refs/heads/master
|
/DataReader.py
|
import re, string
from pyvi import ViTokenizer, ViPosTagger
import pymysql.cursors
class DataReader:
def connect_database(self):
#1. Kết nối vào database
self._connection = pymysql.connect(host='127.0.0.1',
user='root',
password='123456',
db='baiviet',
charset='utf8',
)
try:
with self._connection.cursor() as cursor:
# Create a new record
sql = """SELECT the_loai, noi_dung FROM bai_viet"""
cursor.execute(sql)
self._result = cursor.fetchall()
finally:
self._connection.close()
def load_topics(self):
self._topics = []
topics = set()
for doc in self._result:
topics.add(doc[0])
self._topics = sorted(topics)
# print(self._topics)
def clean_data(self):
self._data = []
for doc in self._result:
# print(doc[0])
label = self._topics.index(doc[0])
# print(label)
words = str(doc[1])
words = words.replace('\n',' ')
#Loại bỏ ký tự đặc biệt và ký tự số
words = re.sub(r'[^\w\s]','', str(words), re.UNICODE)
words = re.sub(r'(\b\d+\b)','', str(words), re.UNICODE)
#Tách từ
doc_clean = ViTokenizer.tokenize(words)
doc_clean = doc_clean.lower()
doc_clean = doc_clean.split()
#Loại bỏ stop words
stop_words = list()
f = open("vietnamese-stopwords-dash.txt", mode="r", encoding="utf-8")
for line in f:
stop_words.append(line[:-1]); #bỏ \n ở cuối từ
f.close()
words = [word for word in doc_clean if word not in stop_words and word not in string.punctuation]
content = ' '.join(words)
# print(content)
self._data.append(str(label) + '<fff>' + content)
def save_text_processed(self, path):
with open(path, 'w', encoding="utf-8") as f:
f.write('\n'.join(self._data))
|
{"/main.py": ["/DataCollection.py", "/DataReader.py"]}
|
36,089
|
andrekos/satflow
|
refs/heads/main
|
/satflow/data/datasets.py
|
from typing import Tuple, Union, List, Optional
import numpy as np
from nowcasting_dataset.dataset.datasets import NetCDFDataset
from nowcasting_dataset.config.model import Configuration
from nowcasting_dataset.consts import (
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
NWP_DATA,
NWP_Y_COORDS,
NWP_X_COORDS,
TOPOGRAPHIC_DATA,
DATETIME_FEATURE_NAMES,
)
class SatFlowDataset(NetCDFDataset):
"""Loads data saved by the `prepare_ml_training_data.py` script.
Adapted from predict_pv_yield
"""
def __init__(
self,
n_batches: int,
src_path: str,
tmp_path: str,
configuration: Configuration,
cloud: str = "gcp",
required_keys: Union[Tuple[str], List[str]] = [
NWP_DATA,
NWP_X_COORDS,
NWP_Y_COORDS,
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
TOPOGRAPHIC_DATA,
]
+ list(DATETIME_FEATURE_NAMES),
history_minutes: int = 30,
forecast_minutes: int = 60,
combine_inputs: bool = False,
):
"""
Args:
n_batches: Number of batches available on disk.
src_path: The full path (including 'gs://') to the data on
Google Cloud storage.
tmp_path: The full path to the local temporary directory
(on a local filesystem).
batch_size: Batch size, if requested, will subset data along batch dimension
"""
super().__init__(
n_batches,
src_path,
tmp_path,
configuration,
cloud,
required_keys,
history_minutes,
forecast_minutes,
)
# SatFlow specific changes, i.e. which timestep to split on
self.required_keys = list(required_keys)
self.combine_inputs = combine_inputs
self.current_timestep_index = (history_minutes // 5) + 1
def __getitem__(self, batch_idx: int):
batch = super().__getitem__(batch_idx)
# Need to partition out past and future sat images here, along with the rest of the data
past_satellite_data = batch[SATELLITE_DATA][:, : self.current_timestep_index]
future_sat_data = batch[SATELLITE_DATA][:, self.current_timestep_index :]
x = {
SATELLITE_DATA: past_satellite_data,
SATELLITE_X_COORDS: batch.get(SATELLITE_X_COORDS, None),
SATELLITE_Y_COORDS: batch.get(SATELLITE_Y_COORDS, None),
SATELLITE_DATETIME_INDEX: batch[SATELLITE_DATETIME_INDEX][
:, : self.current_timestep_index
],
}
y = {
SATELLITE_DATA: future_sat_data,
SATELLITE_DATETIME_INDEX: batch[SATELLITE_DATETIME_INDEX][
:, self.current_timestep_index :
],
}
for k in list(DATETIME_FEATURE_NAMES):
if k in self.required_keys:
x[k] = batch[k][:, : self.current_timestep_index]
if NWP_DATA in self.required_keys:
past_nwp_data = batch[NWP_DATA][:, :, : self.current_timestep_index]
x[NWP_DATA] = past_nwp_data
x[NWP_X_COORDS] = batch.get(NWP_X_COORDS, None)
x[NWP_Y_COORDS] = batch.get(NWP_Y_COORDS, None)
if TOPOGRAPHIC_DATA in self.required_keys:
# Need to expand dims to get a single channel one
# Results in topographic maps with [Batch, Channel, H, W]
x[TOPOGRAPHIC_DATA] = np.expand_dims(batch[TOPOGRAPHIC_DATA], axis=1)
return x, y
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,090
|
andrekos/satflow
|
refs/heads/main
|
/tests/test_models.py
|
from satflow.models import LitMetNet, Perceiver
from nowcasting_utils.models.base import list_models, create_model
from nowcasting_dataset.consts import (
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
NWP_DATA,
NWP_Y_COORDS,
NWP_X_COORDS,
TOPOGRAPHIC_DATA,
TOPOGRAPHIC_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
DATETIME_FEATURE_NAMES,
)
import yaml
import torch
import pytest
def load_config(config_file):
with open(config_file, "r") as cfg:
return yaml.load(cfg, Loader=yaml.FullLoader)
def test_perceiver_creation():
config = load_config("satflow/configs/model/perceiver.yaml")
config.pop("_target_") # This is only for Hydra
model = Perceiver(**config)
x = {
SATELLITE_DATA: torch.randn(
(2, 6, config["input_size"], config["input_size"], config["sat_channels"])
),
TOPOGRAPHIC_DATA: torch.randn((2, config["input_size"], config["input_size"], 1)),
NWP_DATA: torch.randn(
(2, 6, config["input_size"], config["input_size"], config["nwp_channels"])
),
"forecast_time": torch.randn(2, config["forecast_steps"], 1),
}
query = torch.randn((2, config["input_size"] * config["sat_channels"], config["queries_dim"]))
model.eval()
with torch.no_grad():
out = model(x, query=query)
# MetNet creates predictions for the center 1/4th
assert out.size() == (
2,
config["forecast_steps"] * config["input_size"],
config["sat_channels"] * config["input_size"],
)
assert not torch.isnan(out).any(), "Output included NaNs"
def test_metnet_creation():
config = load_config("satflow/configs/model/metnet.yaml")
config.pop("_target_") # This is only for Hydra
model = LitMetNet(**config)
# MetNet expects original HxW to be 4x the input size
x = torch.randn(
(2, 12, config["input_channels"], config["input_size"] * 4, config["input_size"] * 4)
)
model.eval()
with torch.no_grad():
out = model(x)
# MetNet creates predictions for the center 1/4th
assert out.size() == (
2,
config["forecast_steps"],
config["output_channels"],
config["input_size"] // 4,
config["input_size"] // 4,
)
assert not torch.isnan(out).any(), "Output included NaNs"
@pytest.mark.parametrize("model_name", list_models())
def test_create_model(model_name):
"""
Test that create model works for all registered models
Args:
model_name:
Returns:
"""
# TODO Load options from all configs and make sure they work
model = create_model(model_name)
pass
@pytest.mark.skip(
"Perceiver has changed in SatFlow, doesn't have the same options as the one on HF"
)
def test_load_hf():
"""
Current only HF model is PerceiverIO, change in future to do all ones
Returns:
"""
model = create_model("hf_hub:openclimatefix/perceiver-io")
pass
@pytest.mark.skip(
"Perceiver has changed in SatFlow, doesn't have the same options as the one on HF"
)
def test_load_hf_pretrained():
"""
Current only HF model is PerceiverIO, change in future to do all ones
Returns:
"""
model = create_model("hf_hub:openclimatefix/perceiver-io", pretrained=True)
pass
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,091
|
andrekos/satflow
|
refs/heads/main
|
/satflow/version.py
|
__version__ = "0.3.26"
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,092
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/gan/common.py
|
import functools
import torch
from torch.nn import init
def get_norm_layer(norm_type="instance"):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == "batch":
norm_layer = functools.partial(torch.nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == "instance":
norm_layer = functools.partial(
torch.nn.InstanceNorm2d, affine=False, track_running_stats=False
)
elif norm_type == "none":
def norm_layer(x):
return torch.nn.Identity()
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
def init_weights(net, init_type="normal", init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, "weight") and (
classname.find("Conv") != -1 or classname.find("Linear") != -1
):
if init_type == "normal":
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == "xavier":
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == "kaiming":
init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif init_type == "orthogonal":
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(
"initialization method [%s] is not implemented" % init_type
)
if hasattr(m, "bias") and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif (
classname.find("BatchNorm2d") != -1
): # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print("initialize network with %s" % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type="normal", init_gain=0.02):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
init_weights(net, init_type, init_gain=init_gain)
return net
def cal_gradient_penalty(
netD, real_data, fake_data, device, type="mixed", constant=1.0, lambda_gp=10.0
):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == "real": # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == "fake":
interpolatesv = fake_data
elif type == "mixed":
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = (
alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0])
.contiguous()
.view(*real_data.shape)
)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError("{} not implemented".format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(
outputs=disc_interpolates,
inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (
((gradients + 1e-16).norm(2, dim=1) - constant) ** 2
).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,093
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/conv_lstm.py
|
from typing import Any, Dict, Union
import pytorch_lightning as pl
import torch
import torch.nn as nn
import numpy as np
from nowcasting_utils.models.base import register_model
from nowcasting_utils.models.loss import get_loss
from satflow.models.layers.ConvLSTM import ConvLSTMCell
import torchvision
@register_model
class EncoderDecoderConvLSTM(pl.LightningModule):
def __init__(
self,
hidden_dim: int = 64,
input_channels: int = 12,
out_channels: int = 1,
forecast_steps: int = 48,
lr: float = 0.001,
visualize: bool = False,
loss: Union[str, torch.nn.Module] = "mse",
pretrained: bool = False,
conv_type: str = "standard",
):
super(EncoderDecoderConvLSTM, self).__init__()
self.forecast_steps = forecast_steps
self.criterion = get_loss(loss)
self.lr = lr
self.visualize = visualize
self.model = ConvLSTM(input_channels, hidden_dim, out_channels, conv_type=conv_type)
self.save_hyperparameters()
@classmethod
def from_config(cls, config):
return EncoderDecoderConvLSTM(
hidden_dim=config.get("num_hidden", 64),
input_channels=config.get("in_channels", 12),
out_channels=config.get("out_channels", 1),
forecast_steps=config.get("forecast_steps", 1),
lr=config.get("lr", 0.001),
)
def forward(self, x, future_seq=0, hidden_state=None):
return self.model.forward(x, future_seq, hidden_state)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x, self.forecast_steps)
y_hat = torch.permute(y_hat, dims=(0, 2, 1, 3, 4))
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
# the logger you used (in this case tensorboard)
# if self.visualize:
# if np.random.random() < 0.01:
# self.visualize_step(x, y, y_hat, batch_idx)
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :, :], y[:, f, :, :, :]).item()
frame_loss_dict[f"train/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x, self.forecast_steps)
y_hat = torch.permute(y_hat, dims=(0, 2, 1, 3, 4))
val_loss = self.criterion(y_hat, y)
# Save out loss per frame as well
frame_loss_dict = {}
# y_hat = torch.moveaxis(y_hat, 2, 1)
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :, :], y[:, f, :, :, :]).item()
frame_loss_dict[f"val/frame_{f}_loss"] = frame_loss
self.log("val/loss", val_loss, on_step=True, on_epoch=True)
self.log_dict(frame_loss_dict, on_step=False, on_epoch=True)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x, self.forecast_steps)
loss = self.criterion(y_hat, y)
return loss
def visualize_step(self, x, y, y_hat, batch_idx, step="train"):
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
if len(x.shape) == 5:
# Timesteps per channel
images = x[0].cpu().detach()
for i, t in enumerate(images): # Now would be (C, H, W)
t = [torch.unsqueeze(img, dim=0) for img in t]
image_grid = torchvision.utils.make_grid(t, nrow=self.input_channels)
tensorboard.add_image(
f"{step}/Input_Image_Stack_Frame_{i}", image_grid, global_step=batch_idx
)
images = y[0].cpu().detach()
for i, t in enumerate(images): # Now would be (C, H, W)
t = [torch.unsqueeze(img, dim=0) for img in t]
image_grid = torchvision.utils.make_grid(t, nrow=self.output_channels)
tensorboard.add_image(
f"{step}/Target_Image_Stack_Frame_{i}", image_grid, global_step=batch_idx
)
images = y_hat[0].cpu().detach()
for i, t in enumerate(images): # Now would be (C, H, W)
t = [torch.unsqueeze(img, dim=0) for img in t]
image_grid = torchvision.utils.make_grid(t, nrow=self.output_channels)
tensorboard.add_image(
f"{step}/Generated_Stack_Frame_{i}", image_grid, global_step=batch_idx
)
class ConvLSTM(torch.nn.Module):
def __init__(self, input_channels, hidden_dim, out_channels, conv_type: str = "standard"):
super().__init__()
""" ARCHITECTURE
# Encoder (ConvLSTM)
# Encoder Vector (final hidden state of encoder)
# Decoder (ConvLSTM) - takes Encoder Vector as input
# Decoder (3D CNN) - produces regression predictions for our model
"""
self.encoder_1_convlstm = ConvLSTMCell(
input_dim=input_channels,
hidden_dim=hidden_dim,
kernel_size=(3, 3),
bias=True,
conv_type=conv_type,
)
self.encoder_2_convlstm = ConvLSTMCell(
input_dim=hidden_dim,
hidden_dim=hidden_dim,
kernel_size=(3, 3),
bias=True,
conv_type=conv_type,
)
self.decoder_1_convlstm = ConvLSTMCell(
input_dim=hidden_dim,
hidden_dim=hidden_dim,
kernel_size=(3, 3),
bias=True, # nf + 1
conv_type=conv_type,
)
self.decoder_2_convlstm = ConvLSTMCell(
input_dim=hidden_dim,
hidden_dim=hidden_dim,
kernel_size=(3, 3),
bias=True,
conv_type=conv_type,
)
self.decoder_CNN = nn.Conv3d(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=(1, 3, 3),
padding=(0, 1, 1),
)
def autoencoder(self, x, seq_len, future_step, h_t, c_t, h_t2, c_t2, h_t3, c_t3, h_t4, c_t4):
outputs = []
# encoder
for t in range(seq_len):
h_t, c_t = self.encoder_1_convlstm(
input_tensor=x[:, t, :, :], cur_state=[h_t, c_t]
) # we could concat to provide skip conn here
h_t2, c_t2 = self.encoder_2_convlstm(
input_tensor=h_t, cur_state=[h_t2, c_t2]
) # we could concat to provide skip conn here
# encoder_vector
encoder_vector = h_t2
# decoder
for t in range(future_step):
h_t3, c_t3 = self.decoder_1_convlstm(
input_tensor=encoder_vector, cur_state=[h_t3, c_t3]
) # we could concat to provide skip conn here
h_t4, c_t4 = self.decoder_2_convlstm(
input_tensor=h_t3, cur_state=[h_t4, c_t4]
) # we could concat to provide skip conn here
encoder_vector = h_t4
outputs += [h_t4] # predictions
outputs = torch.stack(outputs, 1)
outputs = outputs.permute(0, 2, 1, 3, 4)
outputs = self.decoder_CNN(outputs)
outputs = torch.nn.Sigmoid()(outputs)
return outputs
def forward(self, x, forecast_steps=0, hidden_state=None):
"""
Parameters
----------
input_tensor:
5-D Tensor of shape (b, t, c, h, w) # batch, time, channel, height, width
"""
# find size of different input dimensions
b, seq_len, _, h, w = x.size()
# initialize hidden states
h_t, c_t = self.encoder_1_convlstm.init_hidden(batch_size=b, image_size=(h, w))
h_t2, c_t2 = self.encoder_2_convlstm.init_hidden(batch_size=b, image_size=(h, w))
h_t3, c_t3 = self.decoder_1_convlstm.init_hidden(batch_size=b, image_size=(h, w))
h_t4, c_t4 = self.decoder_2_convlstm.init_hidden(batch_size=b, image_size=(h, w))
# autoencoder forward
outputs = self.autoencoder(
x, seq_len, forecast_steps, h_t, c_t, h_t2, c_t2, h_t3, c_t3, h_t4, c_t4
)
return outputs
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,094
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/__init__.py
|
from nowcasting_utils.models.base import get_model, create_model
from .conv_lstm import EncoderDecoderConvLSTM, ConvLSTM
from .pl_metnet import LitMetNet
from .runet import R2U_Net, RUnet
from .attention_unet import R2AttU_Net, AttU_Net
from .perceiver import Perceiver
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,095
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/pix2pix.py
|
import torch
import torchvision
import numpy as np
from collections import OrderedDict
from torch.optim import lr_scheduler
import pytorch_lightning as pl
from nowcasting_utils.models.base import register_model
from satflow.models.gan.discriminators import define_discriminator, GANLoss
from satflow.models.gan import define_generator
@register_model
class Pix2Pix(pl.LightningModule):
def __init__(
self,
forecast_steps: int = 48,
input_channels: int = 12,
lr: float = 0.0002,
beta1: float = 0.5,
beta2: float = 0.999,
num_filters: int = 64,
generator_model: str = "unet_128",
norm: str = "batch",
use_dropout: bool = False,
discriminator_model: str = "basic",
discriminator_layers: int = 0,
loss: str = "vanilla",
scheduler: str = "plateau",
lr_epochs: int = 10,
lambda_l1: float = 100.0,
channels_per_timestep: int = 12,
pretrained: bool = False,
):
super().__init__()
self.lr = lr
self.b1 = beta1
self.b2 = beta2
self.loss = loss
self.lambda_l1 = lambda_l1
self.lr_epochs = lr_epochs
self.lr_method = scheduler
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.output_channels = forecast_steps * 12
self.channels_per_timestep = channels_per_timestep
# define networks (both generator and discriminator)
self.generator = define_generator(
input_channels, self.output_channels, num_filters, generator_model, norm, use_dropout
)
self.discriminator = define_discriminator(
input_channels + self.output_channels,
num_filters,
discriminator_model,
discriminator_layers,
norm,
)
# define loss functions
self.criterionGAN = GANLoss(loss)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\
self.save_hyperparameters()
def forward(self, x):
return self.generator(x)
def visualize_step(self, x, y, y_hat, batch_idx, step):
# the logger you used (in this case tensorboard)
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
def training_step(self, batch, batch_idx, optimizer_idx):
images, future_images, future_masks = batch
# train generator
if optimizer_idx == 0:
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
# if np.random.random() < 0.01:
self.visualize_step(images, future_images, generated_images, batch_idx, step="train")
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/g_loss": g_loss})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
gen_output = self(images)
fake = torch.cat((images, gen_output), 1)
fake_loss = self.criterionGAN(self.discriminator(fake), True)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/d_loss": d_loss})
return output
def validation_step(self, batch, batch_idx):
images, future_images, future_masks = batch
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
if np.random.random() < 0.01:
self.visualize_step(images, future_images, generated_images, batch_idx, step="val")
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
fake_loss = self.criterionGAN(self.discriminator(fake), True)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict(
{
"val/discriminator_loss": d_loss,
"val/generator_loss": g_loss,
"progress_bar": tqdm_dict,
"log": tqdm_dict,
}
)
self.log_dict({"val/d_loss": d_loss, "val/g_loss": g_loss, "val/loss": d_loss + g_loss})
return output
def configure_optimizers(self):
lr = self.lr
b1 = self.b1
b2 = self.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
if self.lr_method == "plateau":
g_scheduler = lr_scheduler.ReduceLROnPlateau(
opt_g, mode="min", factor=0.2, threshold=0.01, patience=10
)
d_scheduler = lr_scheduler.ReduceLROnPlateau(
opt_d, mode="min", factor=0.2, threshold=0.01, patience=10
)
elif self.lr_method == "cosine":
g_scheduler = lr_scheduler.CosineAnnealingLR(opt_g, T_max=self.lr_epochs, eta_min=0)
d_scheduler = lr_scheduler.CosineAnnealingLR(opt_d, T_max=self.lr_epochs, eta_min=0)
else:
return NotImplementedError("learning rate policy is not implemented")
return [opt_g, opt_d], [g_scheduler, d_scheduler]
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,096
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/perceiver.py
|
from perceiver_pytorch import MultiPerceiver
from perceiver_pytorch.modalities import InputModality
from perceiver_pytorch.encoders import ImageEncoder
from perceiver_pytorch.decoders import ImageDecoder
from perceiver_pytorch.queries import LearnableQuery
from perceiver_pytorch.utils import encode_position
import torch
from typing import Iterable, Dict, Optional, Any, Union, Tuple
from nowcasting_utils.models.base import register_model, BaseModel
from einops import rearrange, repeat
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from nowcasting_utils.models.loss import get_loss
import torch_optimizer as optim
import logging
from nowcasting_dataset.consts import (
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
NWP_DATA,
NWP_Y_COORDS,
NWP_X_COORDS,
TOPOGRAPHIC_DATA,
TOPOGRAPHIC_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
DATETIME_FEATURE_NAMES,
)
logger = logging.getLogger("satflow.model")
logger.setLevel(logging.WARN)
@register_model
class Perceiver(BaseModel):
def __init__(
self,
input_channels: int = 22,
sat_channels: int = 12,
nwp_channels: int = 10,
base_channels: int = 1,
forecast_steps: int = 48,
history_steps: int = 6,
input_size: int = 64,
lr: float = 5e-4,
visualize: bool = True,
max_frequency: float = 4.0,
depth: int = 6,
num_latents: int = 256,
cross_heads: int = 1,
latent_heads: int = 8,
cross_dim_heads: int = 8,
latent_dim: int = 512,
weight_tie_layers: bool = False,
decoder_ff: bool = True,
dim: int = 32,
logits_dim: int = 100,
queries_dim: int = 32,
latent_dim_heads: int = 64,
loss="mse",
sin_only: bool = False,
encode_fourier: bool = True,
preprocessor_type: Optional[str] = None,
postprocessor_type: Optional[str] = None,
encoder_kwargs: Optional[Dict[str, Any]] = None,
decoder_kwargs: Optional[Dict[str, Any]] = None,
pretrained: bool = False,
predict_timesteps_together: bool = False,
nwp_modality: bool = False,
datetime_modality: bool = False,
use_learnable_query: bool = True,
generate_fourier_features: bool = True,
temporally_consistent_fourier_features: bool = False,
):
super(BaseModel, self).__init__()
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.lr = lr
self.pretrained = pretrained
self.visualize = visualize
self.sat_channels = sat_channels
self.nwp_channels = nwp_channels
self.output_channels = sat_channels
self.criterion = get_loss(loss)
self.input_size = input_size
self.predict_timesteps_together = predict_timesteps_together
self.use_learnable_query = use_learnable_query
self.max_frequency = max_frequency
self.temporally_consistent_fourier_features = temporally_consistent_fourier_features
if use_learnable_query:
self.query = LearnableQuery(
channel_dim=queries_dim,
query_shape=(self.forecast_steps, self.input_size, self.input_size)
if predict_timesteps_together
else (self.input_size, self.input_size),
conv_layer="3d",
max_frequency=max_frequency,
num_frequency_bands=input_size,
sine_only=sin_only,
generate_fourier_features=generate_fourier_features,
)
else:
self.query = None
# Warn if using frequency is smaller than Nyquist Frequency
if max_frequency < input_size / 2:
print(
f"Max frequency is less than Nyquist frequency, currently set to {max_frequency} while "
f"the Nyquist frequency for input of size {input_size} is {input_size / 2}"
)
# Preprocessor, if desired, on top of the other processing done
if preprocessor_type is not None:
if preprocessor_type not in ("conv", "patches", "pixels", "conv1x1", "metnet"):
raise ValueError("Invalid prep_type!")
if preprocessor_type == "metnet":
# MetNet processing
self.preprocessor = ImageEncoder(
crop_size=input_size,
prep_type="metnet",
)
video_input_channels = (
8 * sat_channels
) # This is only done on the sat channel inputs
nwp_input_channels = 8 * nwp_channels
# If doing it on the base map, then need
image_input_channels = 4 * base_channels
else:
self.preprocessor = ImageEncoder(
input_channels=sat_channels,
prep_type=preprocessor_type,
**encoder_kwargs,
)
nwp_input_channels = self.preprocessor.output_channels
video_input_channels = self.preprocessor.output_channels
image_input_channels = self.preprocessor.output_channels
else:
self.preprocessor = None
nwp_input_channels = nwp_channels
video_input_channels = sat_channels
image_input_channels = base_channels
# The preprocessor will change the number of channels in the input
modalities = []
# Timeseries input
sat_modality = InputModality(
name=SATELLITE_DATA,
input_channels=video_input_channels,
input_axis=3, # number of axes, 3 for video
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is, should be Nyquist frequency (i.e. 112 for 224 input image)
sin_only=sin_only, # Whether if sine only for Fourier encoding, TODO test more
fourier_encode=encode_fourier, # Whether to encode position with Fourier features
)
modalities.append(sat_modality)
if nwp_modality:
nwp_modality = InputModality(
name=NWP_DATA,
input_channels=nwp_input_channels,
input_axis=3, # number of axes, 3 for video
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is, should be Nyquist frequency (i.e. 112 for 224 input image)
sin_only=sin_only, # Whether if sine only for Fourier encoding, TODO test more
fourier_encode=encode_fourier, # Whether to encode position with Fourier features
)
modalities.append(nwp_modality)
# Use image modality for latlon, elevation, other base data?
image_modality = InputModality(
name=TOPOGRAPHIC_DATA,
input_channels=image_input_channels,
input_axis=2, # number of axes, 2 for images
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
modalities.append(image_modality)
if not self.predict_timesteps_together:
# Sort audio for timestep one-hot encode? Or include under other modality?
timestep_modality = InputModality(
name="forecast_time",
input_channels=1, # number of channels for mono audio
input_axis=1, # number of axes, 2 for images
num_freq_bands=self.forecast_steps, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
modalities.append(timestep_modality)
# X,Y Coords are given in 1D, and each would be a different modality
# Keeping them as 1D saves input size, just need to add more ones
coord_modalities = (
[
SATELLITE_Y_COORDS,
SATELLITE_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
TOPOGRAPHIC_X_COORDS,
NWP_Y_COORDS,
NWP_X_COORDS,
]
if nwp_modality
else [
SATELLITE_Y_COORDS,
SATELLITE_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
TOPOGRAPHIC_X_COORDS,
]
)
for coord in coord_modalities:
coord_modality = InputModality(
name=coord,
input_channels=1, # number of channels for mono audio
input_axis=1, # number of axes, 2 for images
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
modalities.append(coord_modality)
# Datetime features as well should be incorporated
if datetime_modality:
for date in [SATELLITE_DATETIME_INDEX] + list(DATETIME_FEATURE_NAMES):
date_modality = InputModality(
name=date,
input_channels=1, # number of channels for mono audio
input_axis=1, # number of axes, 2 for images
num_freq_bands=(
2 * history_steps + 1
), # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
modalities.append(date_modality)
self.model = MultiPerceiver(
modalities=modalities,
dim=dim, # dimension of sequence to be encoded
queries_dim=queries_dim, # dimension of decoder queries
logits_dim=logits_dim, # dimension of final logits
depth=depth, # depth of net
num_latents=num_latents, # number of latents, or induced set points, or centroids. different papers giving it different names
latent_dim=latent_dim, # latent dimension
cross_heads=cross_heads, # number of heads for cross attention. paper said 1
latent_heads=latent_heads, # number of heads for latent self attention, 8
cross_dim_head=cross_dim_heads, # number of dimensions per cross attention head
latent_dim_head=latent_dim_heads, # number of dimensions per latent self attention head
weight_tie_layers=weight_tie_layers, # whether to weight tie layers (optional, as indicated in the diagram)
# self_per_cross_attn=self_per_cross_attention, # number of self attention blocks per cross attention
sine_only=sin_only,
fourier_encode_data=encode_fourier,
output_shape=input_size, # Shape of output to make the correct sized logits dim, needed so reshaping works
decoder_ff=decoder_ff, # Optional decoder FF
)
if postprocessor_type is not None:
if postprocessor_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError("Invalid postprocessor_type!")
self.postprocessor = ImageDecoder(
postprocess_type=postprocessor_type, output_channels=sat_channels, **decoder_kwargs
)
else:
self.postprocessor = None
self.save_hyperparameters()
def encode_inputs(self, x: dict) -> Dict[str, torch.Tensor]:
video_inputs = x[SATELLITE_DATA]
nwp_inputs = x.get(NWP_DATA, [])
base_inputs = x.get(
TOPOGRAPHIC_DATA, []
) # Base maps should be the same for all timesteps in a sample
# Run the preprocessors here when encoding the inputs
if self.preprocessor is not None:
# Expects Channel first
video_inputs = self.preprocessor(video_inputs)
base_inputs = self.preprocessor(base_inputs)
if nwp_inputs:
nwp_inputs = self.preprocessor(nwp_inputs)
video_inputs = video_inputs.permute(0, 1, 3, 4, 2) # Channel last
if nwp_inputs:
nwp_inputs = nwp_inputs.permute(0, 1, 3, 4, 2) # Channel last
x[NWP_DATA] = nwp_inputs
base_inputs = base_inputs.permute(0, 2, 3, 1) # Channel last
logger.debug(f"Timeseries: {video_inputs.size()} Base: {base_inputs.size()}")
x[SATELLITE_DATA] = video_inputs
x[TOPOGRAPHIC_DATA] = base_inputs
return x
def add_timestep(self, batch_size: int, timestep: int = 1) -> torch.Tensor:
times = (torch.eye(self.forecast_steps)[timestep]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1, 1, 1)
timestep_input = times * ones
timestep_input = timestep_input.squeeze(-1)
timestep_input = repeat(timestep_input, "... -> b ...", b=batch_size)
logger.debug(f"Forecast Step: {timestep_input.size()}")
return timestep_input
def _train_or_validate_step(self, batch, batch_idx, is_training: bool = True):
x, y = batch
batch_size = y[SATELLITE_DATA].size(0)
# For each future timestep:
predictions = []
query = self.construct_query(x)
x = self.encode_inputs(x)
if self.predict_timesteps_together:
# Predicting all future ones at once
y_hat = self(x, query=query)
y_hat = rearrange(
y_hat,
"b (t h w) c -> b c t h w",
t=self.forecast_steps,
h=self.input_size,
w=self.input_size,
)
else:
for i in range(self.forecast_steps):
x["forecast_time"] = self.add_timestep(batch_size, i).type_as(y)
y_hat = self(x, query=query)
y_hat = rearrange(y_hat, "b h (w c) -> b c h w", c=self.output_channels)
predictions.append(y_hat)
y_hat = torch.stack(predictions, dim=1) # Stack along the timestep dimension
if self.postprocessor is not None:
y_hat = self.postprocessor(y_hat)
if self.visualize:
self.visualize_step(x, y, y_hat, batch_idx, step="train" if is_training else "val")
loss = self.criterion(y, y_hat)
self.log_dict({f"{'train' if is_training else 'val'}/loss": loss})
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(
y_hat[:, f, :, :, :], y[SATELLITE_DATA][:, f, :, :, :]
).item()
frame_loss_dict[f"{'train' if is_training else 'val'}/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def configure_optimizers(self):
# They use LAMB as the optimizer
optimizer = optim.Lamb(self.parameters(), lr=self.lr, betas=(0.9, 0.999))
scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=100)
lr_dict = {
# REQUIRED: The scheduler instance
"scheduler": scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "step",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def construct_query(self, x: dict):
if self.use_learnable_query:
if self.temporally_consistent_fourier_features:
fourier_features = encode_position(
x[SATELLITE_DATA].shape[0],
axis=(
x[SATELLITE_DATA].shape[1] + self.forecast_steps,
self.input_size,
self.input_size,
),
num_frequency_bands=max(
[self.input_size, x[SATELLITE_DATA].shape[1] + self.forecast_steps]
)
* 2
+ 1,
max_frequency=self.max_frequency,
)[
x[SATELLITE_DATA].shape[1] :
] # Only want future part
else:
fourier_features = None
return self.query(x, fourier_features)
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# So want query to be B X (T*H*W) X C to reshape to B x T x C x H x W
if self.preprocessor is not None:
x = self.preprocessor(x[SATELLITE_DATA])
y_query = x # Only want sat channels, the output
# y_query = torch.permute(y_query, (0, 2, 3, 1)) # Channel Last
# Need to reshape to 3 dimensions, TxHxW or HxWxC
# y_query = rearrange(y_query, "b h w d -> b (h w) d")
logger.debug(f"Query Shape: {y_query.shape}")
return y_query
def forward(self, x, mask=None, query=None):
return self.model.forward(x, mask=mask, queries=query)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,097
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/pl_metnet.py
|
import einops
import numpy as np
import torch
import torch.nn as nn
from typing import Any, Dict
from nowcasting_utils.models.base import register_model, BaseModel
from nowcasting_utils.models.loss import get_loss
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from metnet import MetNet
from nowcasting_dataset.consts import (
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
NWP_DATA,
NWP_Y_COORDS,
NWP_X_COORDS,
TOPOGRAPHIC_DATA,
DATETIME_FEATURE_NAMES,
)
head_to_module = {"identity": nn.Identity()}
@register_model
class LitMetNet(BaseModel):
def __init__(
self,
image_encoder: str = "downsampler",
input_channels: int = 12,
sat_channels: int = 12,
input_size: int = 256,
output_channels: int = 12,
hidden_dim: int = 64,
kernel_size: int = 3,
num_layers: int = 1,
num_att_layers: int = 1,
head: str = "identity",
forecast_steps: int = 48,
temporal_dropout: float = 0.2,
lr: float = 0.001,
pretrained: bool = False,
visualize: bool = False,
loss: str = "mse",
):
super(BaseModel, self).__init__()
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.lr = lr
self.pretrained = pretrained
self.visualize = visualize
self.output_channels = output_channels
self.criterion = get_loss(
loss, channel=output_channels, nonnegative_ssim=True, convert_range=True
)
self.model = MetNet(
image_encoder=image_encoder,
input_channels=input_channels,
sat_channels=sat_channels,
input_size=input_size,
output_channels=output_channels,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
num_layers=num_layers,
num_att_layers=num_att_layers,
head=head_to_module[head],
forecast_steps=forecast_steps,
temporal_dropout=temporal_dropout,
)
# TODO: Would be nice to have this automatically applied to all classes
# that inherit from BaseModel
self.save_hyperparameters()
def forward(self, imgs, **kwargs) -> Any:
return self.model(imgs)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=100)
lr_dict = {
# REQUIRED: The scheduler instance
"scheduler": scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "step",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def _combine_data_sources(self, x: Dict[str, torch.Tensor]) -> torch.Tensor:
"""
Combine different data sources from nowcasting dataset into a single input array for each example
Mostly useful for adding topographic data to satellite
Args:
x: Dictionary containing mappings from nowcasting dataset names to the data
Returns:
Numpy array of [Batch, C, T, H, W] to give to model
"""
timesteps = x[SATELLITE_DATA].shape[2]
topographic_repeat = einops.repeat(x[TOPOGRAPHIC_DATA], "b c h w -> b c t h w", t=timesteps)
to_concat = [x[SATELLITE_DATA], topographic_repeat]
to_concat = to_concat + x.get(NWP_DATA, [])
input_data = torch.cat(to_concat, dim=1).float() # Cat along channel dim
return input_data
def _train_or_validate_step(self, batch, batch_idx, is_training: bool = True):
x, y = batch
y[SATELLITE_DATA] = y[SATELLITE_DATA].float()
y_hat = self(self._combine_data_sources(x))
if self.visualize:
if batch_idx == 1:
self.visualize_step(x, y, y_hat, batch_idx, step="train" if is_training else "val")
loss = self.criterion(y_hat, y[SATELLITE_DATA])
self.log(f"{'train' if is_training else 'val'}/loss", loss, prog_bar=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[SATELLITE_DATA][:, f, :, :]).item()
frame_loss_dict[f"{'train' if is_training else 'val'}/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,098
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/runet.py
|
import antialiased_cnns
from satflow.models.layers.RUnetLayers import *
import pytorch_lightning as pl
import torchvision
from typing import Union
from nowcasting_utils.models.base import register_model
from nowcasting_utils.models.loss import get_loss
import numpy as np
@register_model
class RUnet(pl.LightningModule):
def __init__(
self,
input_channels: int = 12,
forecast_steps: int = 48,
recurrent_steps: int = 2,
loss: Union[str, torch.nn.Module] = "mse",
lr: float = 0.001,
visualize: bool = False,
conv_type: str = "standard",
pretrained: bool = False,
):
super().__init__()
self.input_channels = input_channels
self.forecast_steps = forecast_steps
self.module = R2U_Net(
img_ch=input_channels, output_ch=forecast_steps, t=recurrent_steps, conv_type=conv_type
)
self.lr = lr
self.input_channels = input_channels
self.forecast_steps = forecast_steps
self.criterion = get_loss(loss=loss)
self.visualize = visualize
self.save_hyperparameters()
@classmethod
def from_config(cls, config):
return RUnet(
forecast_steps=config.get("forecast_steps", 12),
input_channels=config.get("in_channels", 12),
lr=config.get("lr", 0.001),
)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
if self.visualize:
if np.random.random() < 0.01:
self.visualize_step(x, y, y_hat, batch_idx)
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"train/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
val_loss = self.criterion(y_hat, y)
self.log("val/loss", val_loss)
# Save out loss per frame as well
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"val/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
loss = self.criterion(y_hat, y)
return loss
def visualize_step(self, x, y, y_hat, batch_idx, step="train"):
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
class R2U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1, t=2, conv_type: str = "standard"):
super(R2U_Net, self).__init__()
if conv_type == "antialiased":
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=1)
self.antialiased = True
else:
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.antialiased = False
self.Upsample = nn.Upsample(scale_factor=2)
self.RRCNN1 = RRCNN_block(ch_in=img_ch, ch_out=64, t=t, conv_type=conv_type)
self.Blur1 = antialiased_cnns.BlurPool(64, stride=2) if self.antialiased else nn.Identity()
self.RRCNN2 = RRCNN_block(ch_in=64, ch_out=128, t=t, conv_type=conv_type)
self.Blur2 = antialiased_cnns.BlurPool(128, stride=2) if self.antialiased else nn.Identity()
self.RRCNN3 = RRCNN_block(ch_in=128, ch_out=256, t=t, conv_type=conv_type)
self.Blur3 = antialiased_cnns.BlurPool(256, stride=2) if self.antialiased else nn.Identity()
self.RRCNN4 = RRCNN_block(ch_in=256, ch_out=512, t=t, conv_type=conv_type)
self.Blur4 = antialiased_cnns.BlurPool(512, stride=2) if self.antialiased else nn.Identity()
self.RRCNN5 = RRCNN_block(ch_in=512, ch_out=1024, t=t, conv_type=conv_type)
self.Up5 = up_conv(ch_in=1024, ch_out=512)
self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512, t=t, conv_type=conv_type)
self.Up4 = up_conv(ch_in=512, ch_out=256)
self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256, t=t, conv_type=conv_type)
self.Up3 = up_conv(ch_in=256, ch_out=128)
self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128, t=t, conv_type=conv_type)
self.Up2 = up_conv(ch_in=128, ch_out=64)
self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64, t=t, conv_type=conv_type)
self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x1 = self.RRCNN1(x)
x2 = self.Maxpool(x1)
x2 = self.Blur1(x2)
x2 = self.RRCNN2(x2)
x3 = self.Maxpool(x2)
x3 = self.Blur2(x3)
x3 = self.RRCNN3(x3)
x4 = self.Maxpool(x3)
x4 = self.Blur3(x4)
x4 = self.RRCNN4(x4)
x5 = self.Maxpool(x4)
x5 = self.Blur4(x5)
x5 = self.RRCNN5(x5)
# decoding + concat path
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_RRCNN5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
d1 = self.Conv_1x1(d2)
return d1
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,099
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/attention_unet.py
|
from typing import Union
from satflow.models.layers.RUnetLayers import *
import pytorch_lightning as pl
import torchvision
import numpy as np
from nowcasting_utils.models.loss import get_loss
from nowcasting_utils.models.losses.FocalLoss import FocalLoss
from nowcasting_utils.models.base import register_model
@register_model
class AttentionUnet(pl.LightningModule):
def __init__(
self,
input_channels: int = 12,
forecast_steps: int = 12,
loss: Union[str, torch.nn.Module] = "mse",
lr: float = 0.001,
visualize: bool = False,
conv_type: str = "standard",
pretrained: bool = False,
):
super().__init__()
self.lr = lr
self.visualize = visualize
self.input_channels = input_channels
self.forecast_steps = forecast_steps
self.channels_per_timestep = 12
self.model = AttU_Net(
input_channels=input_channels, output_channels=forecast_steps, conv_type=conv_type
)
self.criterion = get_loss(loss)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
if self.visualize:
if np.random.random() < 0.01:
self.visualize_step(x, y, y_hat, batch_idx, "train")
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"train/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
val_loss = self.criterion(y_hat, y)
self.log("val/loss", val_loss)
# Save out loss per frame as well
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"val/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
loss = self.criterion(y_hat, y)
return loss
def visualize_step(self, x, y, y_hat, batch_idx, step):
# the logger you used (in this case tensorboard)
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
@register_model
class AttentionRUnet(pl.LightningModule):
def __init__(
self,
input_channels: int = 12,
forecast_steps: int = 12,
recurrent_blocks: int = 2,
visualize: bool = False,
loss: Union[str, torch.nn.Module] = "mse",
lr: float = 0.001,
pretrained: bool = False,
):
super().__init__()
self.lr = lr
self.input_channels = input_channels
self.forecast_steps = forecast_steps
self.channels_per_timestep = 12
self.model = R2AttU_Net(
input_channels=input_channels, output_channels=forecast_steps, t=recurrent_blocks
)
self.visualize = visualize
self.criterion = get_loss(loss)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
if self.visualize:
if np.random.random() < 0.01:
self.visualize_step(x, y, y_hat, batch_idx, "train")
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"train/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
val_loss = self.criterion(y_hat, y)
self.log("val/loss", val_loss)
# Save out loss per frame as well
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"val/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
loss = self.criterion(y_hat, y)
return loss
def visualize_step(self, x, y, y_hat, batch_idx, step):
# the logger you used (in this case tensorboard)
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
class AttU_Net(nn.Module):
def __init__(self, input_channels=3, output_channels=1, conv_type: str = "standard"):
super(AttU_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=input_channels, ch_out=64, conv_type=conv_type)
self.Conv2 = conv_block(ch_in=64, ch_out=128, conv_type=conv_type)
self.Conv3 = conv_block(ch_in=128, ch_out=256, conv_type=conv_type)
self.Conv4 = conv_block(ch_in=256, ch_out=512, conv_type=conv_type)
self.Conv5 = conv_block(ch_in=512, ch_out=1024, conv_type=conv_type)
self.Up5 = up_conv(ch_in=1024, ch_out=512)
self.Att5 = Attention_block(F_g=512, F_l=512, F_int=256, conv_type=conv_type)
self.Up_conv5 = conv_block(ch_in=1024, ch_out=512, conv_type=conv_type)
self.Up4 = up_conv(ch_in=512, ch_out=256)
self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128, conv_type=conv_type)
self.Up_conv4 = conv_block(ch_in=512, ch_out=256, conv_type=conv_type)
self.Up3 = up_conv(ch_in=256, ch_out=128)
self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64, conv_type=conv_type)
self.Up_conv3 = conv_block(ch_in=256, ch_out=128, conv_type=conv_type)
self.Up2 = up_conv(ch_in=128, ch_out=64)
self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32, conv_type=conv_type)
self.Up_conv2 = conv_block(ch_in=128, ch_out=64, conv_type=conv_type)
self.Conv_1x1 = nn.Conv2d(64, output_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
# decoding + concat path
d5 = self.Up5(x5)
x4 = self.Att5(g=d5, x=x4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=x3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=x2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=x1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
return d1
class R2AttU_Net(nn.Module):
def __init__(self, input_channels=3, output_channels=1, t=2, conv_type: str = "standard"):
super(R2AttU_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Upsample = nn.Upsample(scale_factor=2)
self.RRCNN1 = RRCNN_block(ch_in=input_channels, ch_out=64, t=t, conv_type=conv_type)
self.RRCNN2 = RRCNN_block(ch_in=64, ch_out=128, t=t, conv_type=conv_type)
self.RRCNN3 = RRCNN_block(ch_in=128, ch_out=256, t=t, conv_type=conv_type)
self.RRCNN4 = RRCNN_block(ch_in=256, ch_out=512, t=t, conv_type=conv_type)
self.RRCNN5 = RRCNN_block(ch_in=512, ch_out=1024, t=t, conv_type=conv_type)
self.Up5 = up_conv(ch_in=1024, ch_out=512)
self.Att5 = Attention_block(F_g=512, F_l=512, F_int=256, conv_type=conv_type)
self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512, t=t, conv_type=conv_type)
self.Up4 = up_conv(ch_in=512, ch_out=256)
self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128, conv_type=conv_type)
self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256, t=t, conv_type=conv_type)
self.Up3 = up_conv(ch_in=256, ch_out=128)
self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64, conv_type=conv_type)
self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128, t=t, conv_type=conv_type)
self.Up2 = up_conv(ch_in=128, ch_out=64)
self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32, conv_type=conv_type)
self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64, t=t, conv_type=conv_type)
self.Conv_1x1 = nn.Conv2d(64, output_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x1 = self.RRCNN1(x)
x2 = self.Maxpool(x1)
x2 = self.RRCNN2(x2)
x3 = self.Maxpool(x2)
x3 = self.RRCNN3(x3)
x4 = self.Maxpool(x3)
x4 = self.RRCNN4(x4)
x5 = self.Maxpool(x4)
x5 = self.RRCNN5(x5)
# decoding + concat path
d5 = self.Up5(x5)
x4 = self.Att5(g=d5, x=x4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_RRCNN5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=x3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=x2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=x1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
d1 = self.Conv_1x1(d2)
return d1
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,100
|
andrekos/satflow
|
refs/heads/main
|
/satflow/data/datamodules.py
|
import os
from nowcasting_dataset.dataset.datasets import worker_init_fn
from nowcasting_dataset.config.load import load_yaml_configuration
from satflow.data.datasets import SatFlowDataset
from typing import Union, List, Tuple, Optional
from nowcasting_dataset.consts import (
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
NWP_DATA,
NWP_Y_COORDS,
NWP_X_COORDS,
DATETIME_FEATURE_NAMES,
TOPOGRAPHIC_DATA,
TOPOGRAPHIC_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
)
import logging
import torch
from pytorch_lightning import LightningDataModule
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
class SatFlowDataModule(LightningDataModule):
"""
Example of LightningDataModule for NETCDF dataset.
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
temp_path: str = ".",
n_train_data: int = 24900,
n_val_data: int = 1000,
cloud: str = "aws",
num_workers: int = 8,
pin_memory: bool = True,
configuration_filename="satflow/configs/local.yaml",
fake_data: bool = False,
required_keys: Union[Tuple[str], List[str]] = [
NWP_DATA,
NWP_X_COORDS,
NWP_Y_COORDS,
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
SATELLITE_DATETIME_INDEX,
TOPOGRAPHIC_DATA,
TOPOGRAPHIC_X_COORDS,
TOPOGRAPHIC_Y_COORDS,
]
+ list(DATETIME_FEATURE_NAMES),
history_minutes: Optional[int] = None,
forecast_minutes: Optional[int] = None,
):
"""
fake_data: random data is created and used instead. This is useful for testing
"""
super().__init__()
self.temp_path = temp_path
self.configuration = load_yaml_configuration(configuration_filename)
self.cloud = cloud
self.n_train_data = n_train_data
self.n_val_data = n_val_data
self.num_workers = num_workers
self.pin_memory = pin_memory
self.fake_data = fake_data
self.required_keys = required_keys
self.forecast_minutes = forecast_minutes
self.history_minutes = history_minutes
self.dataloader_config = dict(
pin_memory=self.pin_memory,
num_workers=self.num_workers,
prefetch_factor=8,
worker_init_fn=worker_init_fn,
persistent_workers=True,
# Disable automatic batching because dataset
# returns complete batches.
batch_size=None,
)
def train_dataloader(self):
if self.fake_data:
train_dataset = FakeDataset(
history_minutes=self.history_minutes, forecast_minutes=self.forecast_minutes
)
else:
train_dataset = SatFlowDataset(
self.n_train_data,
os.path.join(self.configuration.output_data.filepath, "train"),
os.path.join(self.temp_path, "train"),
configuration=self.configuration,
cloud=self.cloud,
required_keys=self.required_keys,
history_minutes=self.history_minutes,
forecast_minutes=self.forecast_minutes,
)
return torch.utils.data.DataLoader(train_dataset, **self.dataloader_config)
def val_dataloader(self):
if self.fake_data:
val_dataset = FakeDataset(
history_minutes=self.history_minutes, forecast_minutes=self.forecast_minutes
)
else:
val_dataset = SatFlowDataset(
self.n_val_data,
os.path.join(self.configuration.output_data.filepath, "validation"),
os.path.join(self.temp_path, "validation"),
configuration=self.configuration,
cloud=self.cloud,
required_keys=self.required_keys,
history_minutes=self.history_minutes,
forecast_minutes=self.forecast_minutes,
)
return torch.utils.data.DataLoader(val_dataset, **self.dataloader_config)
def test_dataloader(self):
if self.fake_data:
test_dataset = FakeDataset(
history_minutes=self.history_minutes, forecast_minutes=self.forecast_minutes
)
else:
# TODO need to change this to a test folder
test_dataset = SatFlowDataset(
self.n_val_data,
os.path.join(self.configuration.output_data.filepath, "test"),
os.path.join(self.temp_path, "test"),
configuration=self.configuration,
cloud=self.cloud,
required_keys=self.required_keys,
history_minutes=self.history_minutes,
forecast_minutes=self.forecast_minutes,
)
return torch.utils.data.DataLoader(test_dataset, **self.dataloader_config)
class FakeDataset(torch.utils.data.Dataset):
"""Fake dataset."""
def __init__(
self,
batch_size=32,
width=16,
height=16,
number_sat_channels=12,
length=10,
history_minutes=30,
forecast_minutes=30,
):
self.batch_size = batch_size
if history_minutes is None or forecast_minutes is None:
history_minutes = 30 # Half an hour
forecast_minutes = 240 # 4 hours
self.history_steps = history_minutes // 5
self.forecast_steps = forecast_minutes // 5
self.seq_length = self.history_steps + 1
self.width = width
self.height = height
self.number_sat_channels = number_sat_channels
self.length = length
def __len__(self):
return self.length
def per_worker_init(self, worker_id: int):
pass
def __getitem__(self, idx):
x = {
SATELLITE_DATA: torch.randn(
self.batch_size, self.seq_length, self.width, self.height, self.number_sat_channels
),
NWP_DATA: torch.randn(self.batch_size, 10, self.seq_length, 2, 2),
"hour_of_day_sin": torch.randn(self.batch_size, self.seq_length),
"hour_of_day_cos": torch.randn(self.batch_size, self.seq_length),
"day_of_year_sin": torch.randn(self.batch_size, self.seq_length),
"day_of_year_cos": torch.randn(self.batch_size, self.seq_length),
}
# add fake x and y coords, and make sure they are sorted
x[SATELLITE_X_COORDS], _ = torch.sort(torch.randn(self.batch_size, self.seq_length))
x[SATELLITE_Y_COORDS], _ = torch.sort(
torch.randn(self.batch_size, self.seq_length), descending=True
)
# add sorted (fake) time series
x[SATELLITE_DATETIME_INDEX], _ = torch.sort(torch.randn(self.batch_size, self.seq_length))
y = {
SATELLITE_DATA: torch.randn(
self.batch_size,
self.forecast_steps,
self.width,
self.height,
self.number_sat_channels,
),
}
return x, y
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,101
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/deeplabv3.py
|
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from nowcasting_utils.models.base import register_model
from torchvision.models.segmentation import deeplabv3_resnet50, deeplabv3_resnet101
import numpy as np
from typing import Union
from nowcasting_utils.models.losses.FocalLoss import FocalLoss
@register_model
class DeeplabV3(pl.LightningModule):
def __init__(
self,
forecast_steps: int = 48,
input_channels: int = 12,
lr: float = 0.001,
make_vis: bool = False,
loss: Union[str, torch.nn.Module] = "mse",
backbone: str = "resnet50",
pretrained: bool = False,
aux_loss: bool = False,
):
super(DeeplabV3, self).__init__()
self.lr = lr
assert loss in ["mse", "bce", "binary_crossentropy", "crossentropy", "focal"]
if loss == "mse":
self.criterion = F.mse_loss
elif loss in ["bce", "binary_crossentropy", "crossentropy"]:
self.criterion = F.nll_loss
elif loss in ["focal"]:
self.criterion = FocalLoss()
else:
raise ValueError(f"loss {loss} not recognized")
self.make_vis = make_vis
if backbone in ["r101", "resnet101"]:
self.model = deeplabv3_resnet101(
pretrained=pretrained, num_classes=forecast_steps, aux_loss=aux_loss
)
else:
self.model = deeplabv3_resnet50(
pretrained=pretrained, num_classes=forecast_steps, aux_loss=aux_loss
)
if input_channels != 3:
self.model.backbone.conv1 = torch.nn.Conv2d(
input_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
self.save_hyperparameters()
@classmethod
def from_config(cls, config):
return DeeplabV3(
forecast_steps=config.get("forecast_steps", 12),
input_channels=config.get("in_channels", 12),
hidden_dim=config.get("features", 64),
num_layers=config.get("num_layers", 5),
bilinear=config.get("bilinear", False),
lr=config.get("lr", 0.001),
)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
if self.make_vis:
if np.random.random() < 0.01:
self.visualize(x, y, y_hat, batch_idx)
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
val_loss = self.criterion(y_hat, y)
self.log("val/loss", val_loss, on_step=True, on_epoch=True)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x, self.forecast_steps)
loss = self.criterion(y_hat, y)
return loss
def visualize(self, x, y, y_hat, batch_idx):
# the logger you used (in this case tensorboard)
tensorboard = self.logger.experiment
# Add all the different timesteps for a single prediction, 0.1% of the time
in_image = (
x[0].cpu().detach().numpy()
) # Input image stack, Unet takes everything in channels, so no time dimension
for i, in_slice in enumerate(in_image):
j = 0
if i % self.input_channels == 0: # First one
j += 1
tensorboard.add_image(
f"Input_Image_{j}_Channel_{i}", in_slice, global_step=batch_idx
) # Each Channel
out_image = y_hat[0].cpu().detach().numpy()
for i, out_slice in enumerate(out_image):
tensorboard.add_image(
f"Output_Image_{i}", out_slice, global_step=batch_idx
) # Each Channel
out_image = y[0].cpu().detach().numpy()
for i, out_slice in enumerate(out_image):
tensorboard.add_image(
f"Target_Image_{i}", out_slice, global_step=batch_idx
) # Each Channel
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,102
|
andrekos/satflow
|
refs/heads/main
|
/satflow/examples/metnet_example.py
|
from satflow.models import LitMetNet
import torch
import urllib.request
def get_input_target(number: int):
url = f"https://github.com/openclimatefix/satflow/releases/download/v0.0.3/input_{number}.pth"
filename, headers = urllib.request.urlretrieve(url, filename=f"input_{number}.pth")
input_data = torch.load(filename)
return input_data
# Setup the model (need to add loading weights from HuggingFace :)
# 12 satellite channels + 1 Topographic + 3 Lat/Lon + 1 Cloud Mask
# Output Channels: 1 Cloud mask, 12 for Satellite image
model = LitMetNet(input_channels=17, sat_channels=13, input_size=64, out_channels=1)
torch.set_grad_enabled(False)
model.eval()
# The inputs are Tensors of size (Batch, Curr+Prev Timesteps, Channel, Width, Height)
# MetNet uses the last 90min of data, the previous 6 timesteps + Current one
# This gives an input of (Batch, 7, 256, 256, 286), for Satflow, we use (Batch, 7, 17, 256, 256) and do the preprocessing
# in the model
# Data processing from raw satellite to Tensors is described in satflow/examples/create_webdataset.py and satflow/data/datasets.py
# This just takes the output from the Dataloader, which has been stored here
for i in range(11):
forecast = model(get_input_target(i))
print(forecast.size())
# Output for this segmentation model is (Batch, 24, 1, 16, 16) for Satflow, MetNet has an output of (Batch, 480, 1, 256, 256)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,103
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/layers/Attention.py
|
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
class SeparableAttn(nn.Module):
def __init__(
self, in_dim, activation=F.relu, pooling_factor=2, padding_mode="constant", padding_value=0
):
super().__init__()
self.model = nn.Sequential(
SeparableAttnCell(in_dim, "T", activation, pooling_factor, padding_mode, padding_value),
SeparableAttnCell(in_dim, "W", activation, pooling_factor, padding_mode, padding_value),
SeparableAttnCell(in_dim, "H", activation, pooling_factor, padding_mode, padding_value),
)
def forward(self, x):
return self.model(x)
class SeparableAttnCell(nn.Module):
def __init__(
self,
in_dim,
attn_id=None,
activation=F.relu,
pooling_factor=2,
padding_mode="constant",
padding_value=0,
):
super().__init__()
self.attn_id = attn_id
self.activation = activation
self.query_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 2, kernel_size=1)
self.key_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 2, kernel_size=1)
self.value_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
# only pooling on the first dimension
self.pooling = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(pooling_factor, 1, 1))
self.pooling_factor = pooling_factor
self.padding_mode = padding_mode
self.padding_value = padding_value
self.gamma = nn.Parameter(torch.zeros((1,)))
self.softmax = nn.Softmax(dim=-1)
def init_conv(self, conv, glu=True):
init.xavier_uniform_(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
def forward(self, x):
batch_size, C, T, W, H = x.size()
assert T % 2 == 0 and W % 2 == 0 and H % 2 == 0, "T, W, H is not even"
# TODO attention space consumption
# query = self.query_conv(x).view(batch_size, -1, T * W).permute(0, 2, 1) # B x (TW) x (CH)
#
# key = self.key_conv(x) # B x C x T x H x W
# key = self.pooling(key).view(batch_size, -1, T * H // self.pooling_factor) # B x (CW) x (TH // 8)
#
# if H < W:
# query = F.pad(query, [0, C * (W - H)], self.padding_mode, self.padding_value)
# else:
# key = F.pad(key, [0, 0, 0, C * (H - W)], self.padding_mode, self.padding_value)
if self.attn_id == "T":
attn_dim = T
out = x[:]
elif self.attn_id == "W":
attn_dim = W
out = x.transpose(2, 3)
else:
attn_dim = H
out = x.transpose(2, 4)
query = self.query_conv(out).view(batch_size, attn_dim, -1) # B x T x (CWH)
key = self.key_conv(out) # B x C x T x H x W
key = self.pooling(key).view(
batch_size, -1, attn_dim // self.pooling_factor
) # B x (CWH) x (T // pl)
dist = torch.bmm(query, key) # B x T x (T // 4)
attn_score = self.softmax(dist) # B x T x (T // 4)
value = self.value_conv(out)
value = self.pooling(value).view(
batch_size, -1, attn_dim // self.pooling_factor
) # B x (CWH) x (T // pl)
out = torch.bmm(value, attn_score.transpose(2, 1)) # B x (CWH) x T
if self.attn_id == "T":
out = out.view(batch_size, C, W, H, T).permute(0, 1, 4, 2, 3)
elif self.attn_id == "W":
out = out.view(batch_size, C, T, H, W).permute(0, 1, 2, 4, 3)
elif self.attn_id == "H":
out = out.view(batch_size, C, T, W, H)
out = self.gamma * out + x
return out
class SelfAttention(nn.Module):
def __init__(self, in_dim, activation=F.relu, pooling_factor=2): # TODO for better compability
super(SelfAttention, self).__init__()
self.activation = activation
self.query_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 2, kernel_size=1)
self.key_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 2, kernel_size=1)
self.value_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.pooling = nn.MaxPool3d(kernel_size=2, stride=pooling_factor)
self.pooling_factor = pooling_factor ** 3
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def init_conv(self, conv, glu=True):
init.xavier_uniform_(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
def forward(self, x):
if len(x.size()) == 4:
batch_size, C, W, H = x.size()
T = 1
else:
batch_size, C, T, W, H = x.size()
assert T % 2 == 0 and W % 2 == 0 and H % 2 == 0, "T, W, H is not even"
N = T * W * H
query = self.query_conv(x).view(batch_size, -1, N).permute(0, 2, 1) # B x N x C
key = self.key_conv(x) # B x C x W x H
key = self.pooling(key).view(batch_size, -1, N // self.pooling_factor) # B x C x (N // pl)
dist = torch.bmm(query, key) # B x N x (N // pl)
attn_score = self.softmax(dist) # B x N x (N // pl)
value = self.value_conv(x)
value = self.pooling(value).view(
batch_size, -1, N // self.pooling_factor
) # B x C x (N // pl)
out = torch.bmm(value, attn_score.permute(0, 2, 1)) # B x C x N
if len(x.size()) == 4:
out = out.view(batch_size, C, W, H)
else:
out = out.view(batch_size, C, T, W, H)
out = self.gamma * out + x
return out
class SelfAttention2d(nn.Module):
r"""Self Attention Module as proposed in the paper `"Self-Attention Generative Adversarial
Networks by Han Zhang et. al." <https://arxiv.org/abs/1805.08318>`_
.. math:: attention = softmax((query(x))^T * key(x))
.. math:: output = \gamma * value(x) * attention + x
where
- :math:`query` : 2D Convolution Operation
- :math:`key` : 2D Convolution Operation
- :math:`value` : 2D Convolution Operation
- :math:`x` : Input
Args:
input_dims (int): The input channel dimension in the input ``x``.
output_dims (int, optional): The output channel dimension. If ``None`` the output
channel value is computed as ``input_dims // 8``. So if the ``input_dims`` is **less
than 8** then the layer will give an error.
return_attn (bool, optional): Set it to ``True`` if you want the attention values to be
returned.
"""
def __init__(self, input_dims, output_dims=None, return_attn=False):
output_dims = input_dims // 8 if output_dims is None else output_dims
if output_dims == 0:
raise Exception(
"The output dims corresponding to the input dims is 0. Increase the input\
dims to 8 or more. Else specify output_dims"
)
super(SelfAttention2d, self).__init__()
self.query = nn.Conv2d(input_dims, output_dims, 1)
self.key = nn.Conv2d(input_dims, output_dims, 1)
self.value = nn.Conv2d(input_dims, input_dims, 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.return_attn = return_attn
def forward(self, x):
r"""Computes the output of the Self Attention Layer
Args:
x (torch.Tensor): A 4D Tensor with the channel dimension same as ``input_dims``.
Returns:
A tuple of the ``output`` and the ``attention`` if ``return_attn`` is set to ``True``
else just the ``output`` tensor.
"""
dims = (x.size(0), -1, x.size(2) * x.size(3))
out_query = self.query(x).view(dims)
out_key = self.key(x).view(dims).permute(0, 2, 1)
attn = F.softmax(torch.bmm(out_key, out_query), dim=-1)
out_value = self.value(x).view(dims)
out_value = torch.bmm(out_value, attn).view(x.size())
out = self.gamma * out_value + x
if self.return_attn:
return out, attn
return out
if __name__ == "__main__":
self_attn = SelfAttention(16) # no less than 8
print(self_attn)
n_frames = 4
x = torch.rand(1, 16, n_frames, 32, 32)
y = self_attn(x)
print(x.size())
print(y.size())
# with SummaryWriter(comment='self-attention') as w:
# w.add_graph(self_attn, [x,])
del x, y
sepa_attn = SeparableAttn(64)
print(sepa_attn)
x = torch.rand(1, 64, 3, 128, 256)
y = sepa_attn(x)
print(x.size())
print(y.size())
# with SummaryWriter(comment='separable-attention') as w:
# w.add_graph(self_attn, [x,])
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,104
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/gan/discriminators.py
|
import functools
import torch
from torch import nn as nn
from satflow.models.utils import get_conv_layer
from satflow.models.gan.common import get_norm_layer, init_net
import antialiased_cnns
def define_discriminator(
input_nc,
ndf,
netD,
n_layers_D=3,
norm="batch",
init_type="normal",
init_gain=0.02,
conv_type: str = "standard",
):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == "basic": # default PatchGAN classifier
net = NLayerDiscriminator(
input_nc, ndf, n_layers=3, norm_layer=norm_layer, conv_type=conv_type
)
elif netD == "n_layers": # more options
net = NLayerDiscriminator(
input_nc, ndf, n_layers_D, norm_layer=norm_layer, conv_type=conv_type
)
elif netD == "pixel": # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, conv_type=conv_type)
elif netD == "enhanced":
net = CloudGANDiscriminator(
input_channels=input_nc, num_filters=ndf, num_stages=3, conv_type=conv_type
)
else:
raise NotImplementedError("Discriminator model name [%s] is not recognized" % netD)
return init_net(net, init_type, init_gain)
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
"""Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer("real_label", torch.tensor(target_real_label))
self.register_buffer("fake_label", torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == "lsgan":
self.loss = nn.MSELoss()
elif gan_mode == "vanilla":
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ["wgangp"]:
self.loss = None
else:
raise NotImplementedError("gan mode %s not implemented" % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ["lsgan", "vanilla"]:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == "wgangp":
if target_is_real: # Its real
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(
self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, conv_type: str = "standard"
):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if (
type(norm_layer) == functools.partial
): # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
conv2d = get_conv_layer(conv_type)
kw = 4
padw = 1
sequence = [
conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True),
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if conv_type == "antialiased":
block = [
conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
antialiased_cnns.BlurPool(ndf * nf_mult, stride=2),
]
else:
block = [
conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
sequence += block
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
sequence += [
conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, conv_type: str = "standard"):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if (
type(norm_layer) == functools.partial
): # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
conv2d = get_conv_layer(conv_type)
self.net = [
conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias),
]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class CloudGANBlock(nn.Module):
def __init__(self, input_channels, conv_type: str = "standard"):
super().__init__()
conv2d = get_conv_layer(conv_type)
self.conv = conv2d(input_channels, input_channels * 2, kernel_size=(3, 3))
self.relu = torch.nn.ReLU()
if conv_type == "antialiased":
self.pool = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=1)
self.blurpool = antialiased_cnns.BlurPool(input_channels * 2, stride=2)
else:
self.pool = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.blurpool = torch.nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.pool(x)
x = self.blurpool(x)
return x
class CloudGANDiscriminator(nn.Module):
"""Defines a discriminator based off https://www.climatechange.ai/papers/icml2021/54/slides.pdf"""
def __init__(
self,
input_channels: int = 12,
num_filters: int = 64,
num_stages: int = 3,
conv_type: str = "standard",
):
super().__init__()
conv2d = get_conv_layer(conv_type)
self.conv_1 = conv2d(input_channels, num_filters, kernel_size=1, stride=1, padding=0)
self.stages = []
for stage in range(num_stages):
self.stages.append(CloudGANBlock(num_filters, conv_type))
num_filters = num_filters * 2
self.stages = torch.nn.Sequential(*self.stages)
self.flatten = torch.nn.Flatten()
self.fc = torch.nn.LazyLinear(1) # Real/Fake
def forward(self, x):
x = self.conv_1(x)
x = self.stages(x)
x = self.flatten(x)
x = self.fc(x)
return x
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,105
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/unet.py
|
import torch
import pytorch_lightning as pl
from nowcasting_utils.models.base import register_model
from pl_bolts.models.vision import UNet
import numpy as np
from typing import Union
import torchvision
from nowcasting_utils.models.loss import get_loss
@register_model
class Unet(pl.LightningModule):
def __init__(
self,
forecast_steps: int,
input_channels: int = 3,
num_layers: int = 5,
hidden_dim: int = 64,
bilinear: bool = False,
lr: float = 0.001,
visualize: bool = False,
loss: Union[str, torch.nn.Module] = "mse",
pretrained: bool = False,
):
super(Unet, self).__init__()
self.lr = lr
self.input_channels = input_channels
self.forecast_steps = forecast_steps
self.criterion = get_loss(loss=loss)
self.visualize = visualize
self.model = UNet(forecast_steps, input_channels, num_layers, hidden_dim, bilinear)
self.save_hyperparameters()
@classmethod
def from_config(cls, config):
return Unet(
forecast_steps=config.get("forecast_steps", 12),
input_channels=config.get("in_channels", 12),
hidden_dim=config.get("features", 64),
num_layers=config.get("num_layers", 5),
bilinear=config.get("bilinear", False),
lr=config.get("lr", 0.001),
)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
if self.visualize:
if np.random.random() < 0.01:
self.visualize_step(x, y, y_hat, batch_idx)
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = self.criterion(y_hat, y)
self.log("train/loss", loss, on_step=True)
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"train/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
val_loss = self.criterion(y_hat, y)
self.log("val/loss", val_loss)
# Save out loss per frame as well
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :], y[:, f, :, :]).item()
frame_loss_dict[f"val/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
x = x.float()
y_hat = self(x)
loss = self.criterion(y_hat, y)
return loss
def visualize_step(self, x, y, y_hat, batch_idx, step="train"):
tensorboard = self.logger.experiment[0]
# Add all the different timesteps for a single prediction, 0.1% of the time
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,106
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/gan/generators.py
|
import functools
import torch
from torch import nn as nn
from typing import Union
from satflow.models.gan.common import get_norm_layer, init_net
from satflow.models.utils import get_conv_layer
import antialiased_cnns
def define_generator(
input_nc,
output_nc,
ngf,
netG: Union[str, torch.nn.Module],
norm="batch",
use_dropout=False,
init_type="normal",
init_gain=0.02,
):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if isinstance(netG, torch.nn.Module):
net = netG
elif netG == "resnet_9blocks":
net = ResnetGenerator(
input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9
)
elif netG == "resnet_6blocks":
net = ResnetGenerator(
input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6
)
elif netG == "unet_128":
net = UnetGenerator(
input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout
)
elif netG == "unet_256":
net = UnetGenerator(
input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout
)
else:
raise NotImplementedError("Generator model name [%s] is not recognized" % netG)
return init_net(net, init_type, init_gain)
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(
self,
input_nc,
output_nc,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
n_blocks=6,
padding_type="reflect",
conv_type: str = "standard",
):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert n_blocks >= 0
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
conv2d = get_conv_layer(conv_type)
model = [
nn.ReflectionPad2d(3),
conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True),
]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if conv_type == "antialiased":
block = [
conv2d(
ngf * mult,
ngf * mult * 2,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
antialiased_cnns.BlurPool(ngf * mult * 2, stride=2),
]
else:
block = [
conv2d(
ngf * mult,
ngf * mult * 2,
kernel_size=3,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
]
model += block
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [
ResnetBlock(
ngf * mult,
padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout,
use_bias=use_bias,
)
]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [
nn.ConvTranspose2d(
ngf * mult,
int(ngf * mult / 2),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=use_bias,
),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True),
]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(
self, dim, padding_type, norm_layer, use_dropout, use_bias, conv_type: str = "standard"
):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
conv2d = get_conv_layer(conv_type)
self.conv_block = self.build_conv_block(
dim, padding_type, norm_layer, use_dropout, use_bias, conv2d
)
def build_conv_block(
self, dim, padding_type, norm_layer, use_dropout, use_bias, conv2d: torch.nn.Module
):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True),
]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(
self,
input_nc,
output_nc,
num_downs,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
conv_type: str = "standard",
):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(
ngf * 8,
ngf * 8,
input_nc=None,
submodule=None,
norm_layer=norm_layer,
innermost=True,
conv_type=conv_type,
) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(
ngf * 8,
ngf * 8,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
use_dropout=use_dropout,
conv_type=conv_type,
)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(
ngf * 4,
ngf * 8,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
unet_block = UnetSkipConnectionBlock(
ngf * 2,
ngf * 4,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
unet_block = UnetSkipConnectionBlock(
ngf,
ngf * 2,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
self.model = UnetSkipConnectionBlock(
output_nc,
ngf,
input_nc=input_nc,
submodule=unet_block,
outermost=True,
norm_layer=norm_layer,
conv_type=conv_type,
) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(
self,
outer_nc,
inner_nc,
input_nc=None,
submodule=None,
outermost=False,
innermost=False,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
conv_type: str = "standard",
):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
conv2d = get_conv_layer(conv_type)
if conv_type == "antialiased":
antialiased = True
downconv = conv2d(input_nc, inner_nc, kernel_size=4, stride=1, padding=1, bias=use_bias)
blurpool = antialiased_cnns.BlurPool(inner_nc, stride=2)
else:
antialiased = False
downconv = conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(
inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
)
down = [downrelu, downconv, blurpool] if antialiased else [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(
inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
)
down = (
[downrelu, downconv, downnorm, blurpool]
if antialiased
else [downrelu, downconv, downnorm]
)
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,107
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/layers/Generator.py
|
import torch
import torch.nn as nn
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from satflow.models.layers.GResBlock import GResBlock
from satflow.models.layers.Normalization import SpectralNorm
from satflow.models.layers.ConvGRU import ConvGRU
from satflow.models.layers.Attention import SelfAttention, SeparableAttn
# from Module.CrossReplicaBN import ScaledCrossReplicaBatchNorm2d
class Generator(nn.Module):
def __init__(self, in_dim=120, latent_dim=4, n_class=4, ch=32, n_frames=48, hierar_flag=False):
super().__init__()
self.in_dim = in_dim
self.latent_dim = latent_dim
self.n_class = n_class
self.ch = ch
self.hierar_flag = hierar_flag
self.n_frames = n_frames
self.embedding = nn.Embedding(n_class, in_dim)
self.affine_transfrom = nn.Linear(in_dim * 2, latent_dim * latent_dim * 8 * ch)
self.conv = nn.ModuleList(
[
ConvGRU(
8 * ch,
hidden_sizes=[8 * ch, 16 * ch, 8 * ch],
kernel_sizes=[3, 5, 3],
n_layers=3,
),
# ConvGRU(8 * ch, hidden_sizes=[8 * ch, 8 * ch], kernel_sizes=[3, 3], n_layers=2),
GResBlock(8 * ch, 8 * ch, n_class=in_dim * 2, upsample_factor=1),
GResBlock(8 * ch, 8 * ch, n_class=in_dim * 2),
ConvGRU(
8 * ch,
hidden_sizes=[8 * ch, 16 * ch, 8 * ch],
kernel_sizes=[3, 5, 3],
n_layers=3,
),
# ConvGRU(8 * ch, hidden_sizes=[8 * ch, 8 * ch], kernel_sizes=[3, 3], n_layers=2),
GResBlock(8 * ch, 8 * ch, n_class=in_dim * 2, upsample_factor=1),
GResBlock(8 * ch, 8 * ch, n_class=in_dim * 2),
ConvGRU(
8 * ch,
hidden_sizes=[8 * ch, 16 * ch, 8 * ch],
kernel_sizes=[3, 5, 3],
n_layers=3,
),
# ConvGRU(8 * ch, hidden_sizes=[8 * ch, 8 * ch], kernel_sizes=[3, 3], n_layers=2),
GResBlock(8 * ch, 8 * ch, n_class=in_dim * 2, upsample_factor=1),
GResBlock(8 * ch, 4 * ch, n_class=in_dim * 2),
ConvGRU(
4 * ch,
hidden_sizes=[4 * ch, 8 * ch, 4 * ch],
kernel_sizes=[3, 5, 5],
n_layers=3,
),
# ConvGRU(4 * ch, hidden_sizes=[4 * ch, 4 * ch], kernel_sizes=[3, 5], n_layers=2),
GResBlock(4 * ch, 4 * ch, n_class=in_dim * 2, upsample_factor=1),
GResBlock(4 * ch, 2 * ch, n_class=in_dim * 2),
]
)
self.colorize = SpectralNorm(nn.Conv2d(2 * ch, 3, kernel_size=(3, 3), padding=1))
def forward(self, x, class_id):
if self.hierar_flag is True:
noise_emb = torch.split(x, self.in_dim, dim=1)
else:
noise_emb = x
class_emb = self.embedding(class_id)
if self.hierar_flag is True:
y = self.affine_transfrom(
torch.cat((noise_emb[0], class_emb), dim=1)
) # B x (2 x ld x ch)
else:
y = self.affine_transfrom(torch.cat((noise_emb, class_emb), dim=1)) # B x (2 x ld x ch)
y = y.view(-1, 8 * self.ch, self.latent_dim, self.latent_dim) # B x ch x ld x ld
for k, conv in enumerate(self.conv):
if isinstance(conv, ConvGRU):
if k > 0:
_, C, W, H = y.size()
y = y.view(-1, self.n_frames, C, W, H).contiguous()
frame_list = []
for i in range(self.n_frames):
if k == 0:
if i == 0:
frame_list.append(conv(y)) # T x [B x ch x ld x ld]
else:
frame_list.append(conv(y, frame_list[i - 1]))
else:
if i == 0:
frame_list.append(
conv(y[:, 0, :, :, :].squeeze(1))
) # T x [B x ch x ld x ld]
else:
frame_list.append(conv(y[:, i, :, :, :].squeeze(1), frame_list[i - 1]))
frame_hidden_list = []
for i in frame_list:
frame_hidden_list.append(i[-1].unsqueeze(0))
y = torch.cat(frame_hidden_list, dim=0) # T x B x ch x ld x ld
y = y.permute(1, 0, 2, 3, 4).contiguous() # B x T x ch x ld x ld
# print(y.size())
B, T, C, W, H = y.size()
y = y.view(-1, C, W, H)
elif isinstance(conv, GResBlock):
condition = torch.cat([noise_emb, class_emb], dim=1)
condition = condition.repeat(self.n_frames, 1)
y = conv(y, condition) # BT, C, W, H
y = F.relu(y)
y = self.colorize(y)
y = torch.tanh(y)
BT, C, W, H = y.size()
y = y.view(-1, self.n_frames, C, W, H) # B, T, C, W, H
return y
if __name__ == "__main__":
batch_size = 5
in_dim = 120
n_class = 4
n_frames = 4
x = torch.randn(batch_size, in_dim).cuda()
class_label = torch.randint(low=0, high=3, size=(batch_size,)).cuda()
generator = Generator(in_dim, n_class=n_class, ch=3, n_frames=n_frames).cuda()
y = generator(x, class_label)
print(x.size())
print(y.size())
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,108
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/pixel_cnn.py
|
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from nowcasting_utils.models.base import register_model
from pl_bolts.models.vision import PixelCNN as Pixcnn
@register_model
class PixelCNN(pl.LightningModule):
def __init__(
self,
future_timesteps: int,
input_channels: int = 3,
num_layers: int = 5,
num_hidden: int = 64,
pretrained: bool = False,
lr: float = 0.001,
):
super(PixelCNN, self).__init__()
self.lr = lr
self.model = Pixcnn(
input_channels=input_channels, hidden_channels=num_hidden, num_blocks=num_layers
)
@classmethod
def from_config(cls, config):
return PixelCNN(
future_timesteps=config.get("future_timesteps", 12),
input_channels=config.get("in_channels", 12),
features_start=config.get("features", 64),
num_layers=config.get("num_layers", 5),
bilinear=config.get("bilinear", False),
lr=config.get("lr", 0.001),
)
def forward(self, x):
self.model.forward(x)
def configure_optimizers(self):
# DeepSpeedCPUAdam provides 5x to 7x speedup over torch.optim.adam(w)
# optimizer = torch.optim.adam()
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
# Generally only care about the center x crop, so the model can take into account the clouds in the area without
# being penalized for that, but for now, just do general MSE loss, also only care about first 12 channels
loss = F.mse_loss(y_hat, y)
self.log("train/loss", loss, on_step=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
val_loss = F.mse_loss(y_hat, y)
self.log("val/loss", val_loss, on_step=True, on_epoch=True)
return val_loss
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x, self.forecast_steps)
loss = F.mse_loss(y_hat, y)
return loss
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,109
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/cloudgan.py
|
import pytorch_lightning as pl
import torch
from torch.optim import lr_scheduler
import torchvision
from collections import OrderedDict
from satflow.models import R2U_Net, ConvLSTM
from satflow.models.gan import GANLoss, define_generator, define_discriminator
from satflow.models.layers import ConditionTime
from nowcasting_utils.models.loss import get_loss
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
import numpy as np
class CloudGAN(pl.LightningModule):
def __init__(
self,
forecast_steps: int = 48,
input_channels: int = 12,
lr: float = 0.0002,
beta1: float = 0.5,
beta2: float = 0.999,
num_filters: int = 64,
generator_model: str = "runet",
norm: str = "batch",
use_dropout: bool = False,
discriminator_model: str = "enhanced",
discriminator_layers: int = 0,
loss: str = "vanilla",
scheduler: str = "plateau",
lr_epochs: int = 10,
lambda_l1: float = 100.0,
l1_loss: str = "l1",
channels_per_timestep: int = 12,
condition_time: bool = False,
pretrained: bool = False,
):
"""
Creates CloudGAN, based off of https://www.climatechange.ai/papers/icml2021/54
Changes include allowing outputs for all timesteps, optionally conditioning on time
for single timestep output
Args:
forecast_steps: Number of timesteps to forecast
input_channels: Number of input channels
lr: Learning Rate
beta1: optimizer beta1
beta2: optimizer beta2 value
num_filters: Number of filters in generator
generator_model: Generator name
norm: Norm type
use_dropout: Whether to use dropout
discriminator_model: model for discriminator, one of options in define_discriminator
discriminator_layers: Number of layers in discriminator, only for NLayerDiscriminator
loss: Loss function, described in GANLoss
scheduler: LR scheduler name
lr_epochs: Epochs for LR scheduler
lambda_l1: Lambda for L1 loss, from slides recommended between 5-200
l1_loss: Loss to use for the L1 in the slides, default is L1, also SSIM is available
channels_per_timestep: Channels per input timestep
condition_time: Whether to condition on a future timestep, similar to MetNet
"""
super().__init__()
self.lr = lr
self.b1 = beta1
self.b2 = beta2
self.loss = loss
self.lambda_l1 = lambda_l1
self.lr_epochs = lr_epochs
self.lr_method = scheduler
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.output_channels = forecast_steps * channels_per_timestep
self.channels_per_timestep = channels_per_timestep
self.condition_time = condition_time
if condition_time:
self.ct = ConditionTime(forecast_steps)
# define networks (both generator and discriminator)
gen_input_channels = (
input_channels # + forecast_steps if condition_time else input_channels
)
self.recurrent = (
False # Does the generator generate all timesteps at once, or a single one at a time?
)
if generator_model == "runet":
generator_model = R2U_Net(gen_input_channels, self.output_channels, t=3)
elif generator_model == "convlstm":
self.recurrent = True # ConvLSTM makes a list of output timesteps
generator_model = ConvLSTM(
gen_input_channels, hidden_dim=num_filters, out_channels=self.channels_per_timestep
)
self.generator = define_generator(
gen_input_channels,
self.output_channels,
num_filters,
generator_model,
norm,
use_dropout,
)
if generator_model == "convlstm":
# Timestep x C x H x W inputs/outputs, need to flatten for discriminator
# TODO Add Discriminator that can use timesteps
self.flatten_generator = True
else:
self.flatten_generator = False
self.discriminator = define_discriminator(
self.channels_per_timestep if condition_time else self.output_channels,
num_filters,
discriminator_model,
discriminator_layers,
norm,
)
# define loss functions
self.criterionGAN = GANLoss(loss)
self.criterionL1 = get_loss(l1_loss, channels=self.channels_per_timestep)
self.save_hyperparameters()
def train_per_timestep(
self, images: torch.Tensor, future_images: torch.Tensor, optimizer_idx: int, batch_idx: int
):
"""
For training with conditioning on time, so when the model is giving a single output
This goes through every timestep in forecast_steps and runs the training
Args:
images: (Batch, Timestep, Channels, Width, Height)
future_images: (Batch, Timestep, Channels, Width, Height)
optimizer_idx: int, the optiimizer to use
Returns:
"""
if optimizer_idx == 0:
# generate images
total_loss = 0
vis_step = True if np.random.random() < 0.01 else False
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
# fake = self(x, forecast_steps=i + 1) # (Batch, Channel, Width, Height)
fake = generated_images[:, :, i, :, :] # Only take the one at the end
if vis_step:
self.visualize_step(
images, future_images[:, i, :, :], fake, batch_idx, step=f"train_frame_{i}"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
# Only L1 loss on the given timestep
l1_loss = self.criterionL1(fake, future_images[:, i, :, :]) * self.lambda_l1
self.log(f"train/frame_{i}_l1_loss", l1_loss)
g_loss = gan_loss + l1_loss
total_loss += g_loss
g_loss = total_loss / self.forecast_steps # Get the mean loss over all timesteps
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/g_loss": g_loss})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# generate images
total_loss = 0
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
# fake = self(x, forecast_steps=i + 1) # (Batch, Channel, Width, Height)
fake = generated_images[:, :, i, :, :] # Only take the one at the end
real_loss = self.criterionGAN(self.discriminator(future_images[:, i, :, :]), True)
# adversarial loss is binary cross-entropy
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# Only L1 loss on the given timestep
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log(f"train/frame_{i}_d_loss", d_loss)
total_loss += d_loss
d_loss = total_loss / self.forecast_steps # Average of the per-timestep loss
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/d_loss": d_loss})
return output
def train_all_timestep(
self, images: torch.Tensor, future_images: torch.Tensor, optimizer_idx: int, batch_idx: int
):
"""
Train on all timesteps, instead of single timestep at a time. No conditioning on future timestep
Args:
images:
future_images:
optimizer_idx:
batch_idx:
Returns:
"""
if optimizer_idx == 0:
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
if np.random.random() < 0.01:
self.visualize_step(
images, future_images, generated_images, batch_idx, step="train"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/g_loss": g_loss})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
gen_output = self(images)
fake = torch.cat((images, gen_output), 1)
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/d_loss": d_loss})
return output
def training_step(self, batch, batch_idx, optimizer_idx):
images, future_images = batch
if self.condition_time:
return self.train_per_timestep(images, future_images, optimizer_idx, batch_idx)
else:
return self.train_all_timestep(images, future_images, optimizer_idx, batch_idx)
def val_all_timestep(self, images, future_images, batch_idx):
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
if np.random.random() < 0.01:
self.visualize_step(images, future_images, generated_images, batch_idx, step="val")
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
fake_loss = self.criterionGAN(self.discriminator(fake), True)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict(
{
"val/discriminator_loss": d_loss,
"val/generator_loss": g_loss,
"progress_bar": tqdm_dict,
"log": tqdm_dict,
}
)
self.log_dict({"val/d_loss": d_loss, "val/g_loss": g_loss, "val/loss": d_loss + g_loss})
return output
def val_per_timestep(self, images, future_images, batch_idx):
total_g_loss = 0
total_d_loss = 0
vis_step = True if np.random.random() < 0.01 else False
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
fake = generated_images[:, :, i, :, :] # Only take the one at the end
if vis_step:
self.visualize_step(
images, future_images[:, i, :, :], fake, batch_idx, step=f"val_frame_{i}"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
# Only L1 loss on the given timestep
l1_loss = self.criterionL1(fake, future_images[:, i, :, :]) * self.lambda_l1
real_loss = self.criterionGAN(self.discriminator(future_images[:, i, :, :]), True)
# adversarial loss is binary cross-entropy
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# Only L1 loss on the given timestep
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log(f"val/frame_{i}_d_loss", d_loss)
total_d_loss += d_loss
self.log(f"val/frame_{i}_l1_loss", l1_loss)
g_loss = gan_loss + l1_loss
total_g_loss += g_loss
g_loss = total_g_loss / self.forecast_steps
d_loss = total_d_loss / self.forecast_steps
loss = g_loss + d_loss
tqdm_dict = {"loss": loss}
output = OrderedDict(
{
"val/discriminator_loss": d_loss,
"val/generator_loss": g_loss,
"progress_bar": tqdm_dict,
"log": tqdm_dict,
}
)
self.log_dict({"val/d_loss": d_loss, "val/g_loss": g_loss, "val/loss": d_loss + g_loss})
return output
def validation_step(self, batch, batch_idx):
images, future_images = batch
if self.condition_time:
return self.val_per_timestep(images, future_images, batch_idx)
else:
return self.val_all_timestep(images, future_images, batch_idx)
def forward(self, x, **kwargs):
return self.generator.forward(x, **kwargs)
def configure_optimizers(self):
lr = self.lr
b1 = self.b1
b2 = self.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
if self.lr_method == "plateau":
g_scheduler = lr_scheduler.ReduceLROnPlateau(
opt_g, mode="min", factor=0.2, threshold=0.01, patience=10
)
d_scheduler = lr_scheduler.ReduceLROnPlateau(
opt_d, mode="min", factor=0.2, threshold=0.01, patience=10
)
elif self.lr_method == "cosine":
g_scheduler = lr_scheduler.CosineAnnealingLR(opt_g, T_max=self.lr_epochs, eta_min=0)
d_scheduler = lr_scheduler.CosineAnnealingLR(opt_d, T_max=self.lr_epochs, eta_min=0)
elif self.lr_method == "warmup":
g_scheduler = LinearWarmupCosineAnnealingLR(
opt_g, warmup_epochs=self.lr_epochs, max_epochs=100
)
d_scheduler = LinearWarmupCosineAnnealingLR(
opt_d, warmup_epochs=self.lr_epochs, max_epochs=100
)
else:
return NotImplementedError("learning rate policy is not implemented")
return [opt_g, opt_d], [g_scheduler, d_scheduler]
def visualize_step(
self, x: torch.Tensor, y: torch.Tensor, y_hat: torch.Tensor, batch_idx: int, step: str
):
# the logger you used (in this case tensorboard)
tensorboard = self.logger.experiment[0]
# Image input is either (B, C, H, W) or (B, T, C, H, W)
if len(x.shape) == 5:
# Timesteps per channel
images = x[0].cpu().detach()
for i, t in enumerate(images): # Now would be (C, H, W)
t = [torch.unsqueeze(img, dim=0) for img in t]
image_grid = torchvision.utils.make_grid(t, nrow=self.channels_per_timestep)
tensorboard.add_image(
f"{step}/Input_Image_Stack_Frame_{i}", image_grid, global_step=batch_idx
)
else:
images = x[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=self.channels_per_timestep)
tensorboard.add_image(f"{step}/Input_Image_Stack", image_grid, global_step=batch_idx)
# In all cases, the output target and image are in (B, C, H, W) format
images = y[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Target_Image_Stack", image_grid, global_step=batch_idx)
images = y_hat[0].cpu().detach()
images = [torch.unsqueeze(img, dim=0) for img in images]
image_grid = torchvision.utils.make_grid(images, nrow=12)
tensorboard.add_image(f"{step}/Generated_Image_Stack", image_grid, global_step=batch_idx)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,110
|
andrekos/satflow
|
refs/heads/main
|
/setup.py
|
from distutils.core import setup
from pathlib import Path
this_directory = Path(__file__).parent
install_requires = (this_directory / "requirements.txt").read_text().splitlines()
long_description = (this_directory / "README.md").read_text()
exec(open("satflow/version.py").read())
setup(
name="satflow",
version=__version__,
packages=["satflow", "satflow.data", "satflow.models"],
url="https://github.com/openclimatefix/satflow",
license="MIT License",
company="Open Climate Fix Ltd",
author="Jacob Bieker",
install_requires=install_requires,
long_description=long_description,
ong_description_content_type="text/markdown",
author_email="jacob@openclimatefix.org",
description="Satellite Optical Flow",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
],
)
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,111
|
andrekos/satflow
|
refs/heads/main
|
/satflow/models/layers/ConditionTime.py
|
import torch
from torch import nn as nn
def condition_time(x, i=0, size=(12, 16), seq_len=15):
"create one hot encoded time image-layers, i in [1, seq_len]"
assert i < seq_len
times = (torch.eye(seq_len, dtype=x.dtype, device=x.device)[i]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1, *size, dtype=x.dtype, device=x.device)
return times * ones
class ConditionTime(nn.Module):
"Condition Time on a stack of images, adds `horizon` channels to image"
def __init__(self, horizon, ch_dim=2, num_dims=5):
super().__init__()
self.horizon = horizon
self.ch_dim = ch_dim
self.num_dims = num_dims
def forward(self, x, fstep=0):
"x stack of images, fsteps"
if self.num_dims == 5:
bs, seq_len, ch, h, w = x.shape
ct = condition_time(x, fstep, (h, w), seq_len=self.horizon).repeat(bs, seq_len, 1, 1, 1)
else:
bs, h, w, ch = x.shape
ct = condition_time(x, fstep, (h, w), seq_len=self.horizon).repeat(bs, 1, 1, 1)
ct = ct.permute(0, 2, 3, 1)
x = torch.cat([x, ct], dim=self.ch_dim)
assert x.shape[self.ch_dim] == (ch + self.horizon) # check if it makes sense
return x
|
{"/tests/test_models.py": ["/satflow/models/__init__.py"], "/satflow/models/__init__.py": ["/satflow/models/conv_lstm.py", "/satflow/models/pl_metnet.py", "/satflow/models/runet.py", "/satflow/models/attention_unet.py", "/satflow/models/perceiver.py"], "/satflow/models/pix2pix.py": ["/satflow/models/gan/discriminators.py"], "/satflow/data/datamodules.py": ["/satflow/data/datasets.py"], "/satflow/examples/metnet_example.py": ["/satflow/models/__init__.py"], "/satflow/models/gan/discriminators.py": ["/satflow/models/gan/common.py"], "/satflow/models/gan/generators.py": ["/satflow/models/gan/common.py"], "/satflow/models/layers/Generator.py": ["/satflow/models/layers/Attention.py"], "/satflow/models/cloudgan.py": ["/satflow/models/__init__.py"]}
|
36,113
|
Meffest/vk_to_telegram
|
refs/heads/master
|
/bot.py
|
import config
import json
from time import sleep
from requests import get
from vk_wall_listener import get_data_from_last_wall_record
def send_message(message_text):
url = 'https://api.telegram.org/bot' + config.telegram_token + '/sendMessage'
parameters = {'chat_id': config.chat_id,
'text': message_text,
'disable_web_page_preview': True}
r = get(url, params=parameters)
return r
def send_image(image_url, message_text=None):
url = 'https://api.telegram.org/bot' + config.telegram_token + '/sendPhoto'
parameters = {'chat_id': config.chat_id,
'photo': image_url}
if message_text:
parameters['caption'] = message_text
else:
parameters['disable_notification'] = True
r = get(url, params=parameters)
return r
def send_media_group(media_urls):
input_media_list = list()
for url in media_urls:
input_media_list.append({'type':'photo','media':url})
url = 'https://api.telegram.org/bot' + config.telegram_token + '/sendMediaGroup'
parameters = {'chat_id': config.chat_id,
'media': json.dumps(input_media_list)}
r = get(url, params=parameters)
return r
def has_already_been_reposted(record, chat):
hashes = get_posted_hashes(chat)
ids = get_posted_ids(chat)
if ((record['hash'] in hashes)
or (record['record_id'] in ids)
or ((record['original_record_id'] != None)
and (record['original_record_id'] in ids))):
return True
else:
return False
def get_posted_hashes(chat):
return posted_records_hashes
# заменить потом на нормальную имплементацию с БД
def get_posted_ids(chat):
return posted_records_ids
# заменить потом на нормальную имплементацию с БД
def get_posted_original_ids(chat):
return posted_records_original_ids
# заменить потом на нормальную имплементацию с БД
def add_record_to_posted(record, chat):
add_hash_to_posted(record['hash'], chat)
add_id_to_posted(record['record_id'], chat)
if record['original_record_id'] != None:
add_id_to_posted(record['original_record_id'], chat) # NB! я намеренно сливаю и id конечных постов, и id оригинальных постов в одно место (для чего — см. ниже)
# тут мы проверяем на выполнение любого условия, приводящего к отмене переброса в Телеграм:
# — либо такое содержимое уже перебрасывали
# (определяем по хешу, учитывающему: а) текст, б) объём каждой картинки
# в любом порядке, если есть картинки; подробнее см. в calculate_hash_for_record())
# — либо перпебрасывали тот же самый пост, который сейчас пытаемся перебросить
# (определяем по id этого поста)
# — либо перебрасывали репост того же самого оригинального поста
# или тот же пост, репост которого сейчас пытаемся перебросить
# (определяем по id этого и id оригинального поста, сравнивая с общей базой id)
def add_hash_to_posted(new_hash, chat):
posted_records_hashes.append(new_hash) # пока возвращаем временный общий список; заменить потом на нормальную имплементацию
def add_id_to_posted(new_id, chat):
posted_records_ids.append(new_id) # то же самое
if __name__ == '__main__':
posted_records_hashes = []
posted_records_ids = []
current_chat = config.chat_id # потом надо будет подставлять сюда каждый чат отдельно, если мы хотим добавить работу с разными чатами
while True:
for group in config.vk_group_ids:
current_record = get_data_from_last_wall_record(group)
if has_already_been_reposted(current_record, current_chat):
continue
else:
add_record_to_posted(current_record, current_chat)
message_text = current_record['text'].replace("<br>", '\n')
if 'images' in current_record:
if len(current_record['images']) > 1:
send_media_group(current_record['images'])
continue
if len(message_text) < 200:
send_image(current_record['images'], message_text)
continue
else:
send_image(current_record['images'])
send_message(message_text)
if len(posted_records_hashes) > 100:
del posted_records_hashes[0] # это точно надо будет куда-то выводить отдельно, особенно когда это уже будет не временная переменная, а БД
sleep(30)
|
{"/bot.py": ["/config.py"]}
|
36,114
|
Meffest/vk_to_telegram
|
refs/heads/master
|
/config.py
|
telegram_token =
chat_id =
vk_token =
vk_group_ids =
|
{"/bot.py": ["/config.py"]}
|
36,117
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/downloadr.py
|
#!/usr/bin/python
import os
import sys
from urllib2 import HTTPError, URLError, urlopen, Request
from slugify import slugify
import hashlib
from Queue import Queue
from threading import Thread
def resourceSlug(url, dir):
hash = hashlib.md5()
hash.update(url)
digest = hash.hexdigest()[:2]
slug = slugify(url)[:128]
return (os.path.join(dir, digest), os.path.join(dir, digest, slug))
class downloaderThread(Thread):
def __init__(self, queue, dir):
Thread.__init__(self)
self.queue = queue
self.dir = dir
def downloadFile(self, url):
url = url.strip()
try:
filedir, filename = resourceSlug(url, self.dir)
if os.path.exists(filename):
return
if not os.path.exists(filedir):
os.mkdir(filedir)
headers = { 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36' }
f = urlopen(Request(url, None, headers))
buffer = f.read()
with open(filename, "wb") as local_file:
local_file.write(buffer)
local_file.close()
except HTTPError, e:
print >>sys.stderr, "HTTPError:", e.code, url
except URLError, e:
print >>sys.stderr, "URLError:", url
#print >>sys.stderr, "URLError:", e.reason, url
def run(self):
while True:
url = self.queue.get()
self.downloadFile(url)
self.queue.task_done()
def downloadFiles(urls, dir):
queue = Queue()
for i in range(64):
t = downloaderThread(queue, dir)
t.setDaemon(True)
t.start()
for url in urls:
queue.put(url)
queue.join()
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,118
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/resizeBenefits.py
|
from downloadr import resourceSlug
from subprocess import call, check_output
import magic
import os
from shutil import copyfile
def analyzeResult(result):
arr = result.split()
url = arr[0]
width = arr[1]
height = arr[2]
return (url, width, height)
def fileSize(name):
return int(os.stat(name).st_size)
def getBenefits(results, dir, ignore_invisibles):
benefits = []
devnull = open(os.devnull, "wb")
for result in results:
(url, width, height) = analyzeResult(result)
filedir, filename = resourceSlug(url, dir)
try:
buffer = open(filename, "rb").read()
except IOError:
continue
ext = magic.from_buffer(buffer).split()[0].lower()
# If it's not one of the known image formats, return!
# Sorry WebP
if (ext != "jpeg") and (ext != "png") and (ext != "gif"):
continue
optimized_file_name = filename + "_lslsopt" + ext
lossy_optimized_file_name = filename + "_lossyopt" + ext
resized_file_name = filename + "_" + width + "_" + height + ext
# optimize the original image
copyfile(filename, optimized_file_name)
call(["image_optim", optimized_file_name], stdout=devnull, stderr=devnull)
# Lossy optimize the original image
call(["convert", optimized_file_name, "-quality", "85", lossy_optimized_file_name])
#call(["image_optim", lossy_optimized_file_name], stdout=devnull, stderr=devnull)
# Resize the original image
call(["convert", optimized_file_name, "-geometry", width+"x"+height, "-quality", "85", resized_file_name])
#call(["image_optim", resized_file_name], stdout=devnull, stderr=devnull)
# Get the original image's dimensions
original_dimensions = check_output("identify -format \"%w,%h\" " + filename + "|sed 's/,/x/'", shell = True).strip()
original_size = fileSize(filename)
optimized_size = fileSize(optimized_file_name)
lossy_optimized_size = fileSize(lossy_optimized_file_name)
resized_size = fileSize(resized_file_name)
# If resizing made the image larger, ignore it
if resized_size > optimized_size:
resized_size = optimized_size
# if the image is not displayed, consider all its data as a waste
if width == "0":
resized_size = 0
if ignore_invisibles:
continue
benefits.append([ filename,
original_size,
original_size - optimized_size,
original_size - lossy_optimized_size,
original_dimensions + "=>" + width + "x" + height,
original_size - resized_size])
devnull.close()
return benefits
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,119
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/slug.py
|
#!/usr/bin/env python
from slugify import slugify
import settings
import sys
import os
if len(sys.argv) <= 1:
print >> sys.stderr, "Usage:", sys.argv[0], "<URL>"
quit()
url = sys.argv[1]
slugged_dir = os.path.join(settings.output_dir, slugify(url))
print slugged_dir
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,120
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/settings.py
|
# The output directory to which the results will be written
output_dir = "/tmp/sizer"
# The viewport values on which sizer will run
viewports = [360, 720, 1260]
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,121
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/sizer_json.py
|
#!/usr/bin/env python
import sys
import os
from sizer import sizer
import json
import requests
if __name__ == "__main__":
# Check input
if len(sys.argv) <= 4:
print >> sys.stderr, "Usage:", sys.argv[0], "<URL> <viewport> <ignore display:none> <postback_url>"
quit()
url = sys.argv[1]
viewport = sys.argv[2]
ignore = (sys.argv[3] != "0")
postback = sys.argv[4]
result = json.dumps(sizer(url, viewport, ignore, False))
if postback:
if not postback.startswith("http"):
postback = "http://" + postback
requests.post(postback, data=result)
print result
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,122
|
yoavweiss/Sizer-Soze
|
refs/heads/master
|
/sizer.py
|
#!/usr/bin/env python
from slugify import slugify
import sys
import os
from subprocess import Popen, PIPE
from downloadr import downloadFiles
import resizeBenefits
import settings
def col(value, length=16):
return str(value).ljust(length + 1)
def sizer(url, viewport, ignore_invisibles, toFile):
# Prepare the output directory
if not url.startswith("http"):
url = "http://" + url
slugged_url = slugify(url)
slugged_dir = os.path.join(settings.output_dir, slugged_url)
current_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(slugged_dir):
os.makedirs(slugged_dir)
image_urls = []
image_results = []
phantom = Popen([os.path.join(current_dir, "getImageDimensions.js"), url, str(viewport)],
stdout = PIPE);
container = image_urls
for line in phantom.stdout.xreadlines():
# Ignore data URIs
if line.startswith("---"):
downloadFiles(image_urls, slugged_dir)
container = image_results
continue
if not line.startswith("http"):
continue
container.append(line)
# Here the process should be dead, and all files should be downloaded
benefits = resizeBenefits.getBenefits(image_results, slugged_dir, ignore_invisibles)
if toFile:
benefits_file = open(os.path.join(slugged_dir, "result_" + str(viewport) + ".txt"), "wt")
image_data = 0
optimize_savings = 0
lossy_optimize_savings = 0
resize_savings = 0
for benefit in benefits:
if toFile:
print >>benefits_file, benefit[0],
print >>benefits_file, "Original_size:",
print >>benefits_file, benefit[1],
print >>benefits_file, "optimize_savings:",
print >>benefits_file, benefit[2],
print >>benefits_file, benefit[3],
print >>benefits_file, benefit[4],
print >>benefits_file, benefit[5]
image_data += benefit[1]
optimize_savings += benefit[2]
lossy_optimize_savings += benefit[3]
resize_savings += benefit[5]
if toFile:
benefits_file.close()
results = { 'summary': {'url': url, 'viewport': viewport,
'image_data': image_data, 'lossless': optimize_savings,
'lossy': lossy_optimize_savings, 'resize': resize_savings},
'details': benefits }
return results
if __name__ == "__main__":
# Check input
if len(sys.argv) <= 1:
print >> sys.stderr, "Usage:", sys.argv[0], "<URL> <ignore display:none>"
quit()
url = sys.argv[1]
if len(sys.argv) > 2:
ignore = bool(sys.argv[2])
else:
ignore = False
print col("url", len(url)), col("viewport"), col("image_data"), col("lossless_savings"), col("lossy_savings"), col("resize_savings")
for viewport in settings.viewports:
result = sizer(url, viewport, ignore, True)
summary = result['summary']
url = summary['url']
viewport = summary['viewport']
image_data = summary['image_data']
optimize_savings = summary['lossless']
lossy_optimize_savings = summary['lossy']
resize_savings = summary['resize']
print col(url, len(url)), col(viewport), col(image_data), col(optimize_savings), col(lossy_optimize_savings), col(resize_savings)
|
{"/resizeBenefits.py": ["/downloadr.py"]}
|
36,127
|
kjona/limesurveyrc2api
|
refs/heads/master
|
/limesurveyrc2api/tests/tests.py
|
import os
import unittest
from operator import itemgetter
from limesurveyrc2api import LimeSurveyRemoteControl2API
from configparser import ConfigParser
class TestBase(unittest.TestCase):
def setUp(self):
# Read config.ini file
current_dir = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(current_dir, 'config.ini')
confparser = ConfigParser()
confparser.read_file(open(config_path))
self.url = confparser['test']['url']
self.username = confparser['test']['username']
self.password = confparser['test']['password']
self.api = LimeSurveyRemoteControl2API(self.url)
self.session_key = None
def tearDown(self):
"""
Clean up any side effects.
Tests should assign to self.session_key so this cleanup can occur.
"""
if self.session_key is not None:
self.api.sessions.release_session_key(self.session_key)
class TestSessions(TestBase):
def test_get_session_key_success(self):
"""
Requesting a session key with valid creds should return a session key.
- A. Verify the return value for valid credentials is a 32 char string.
"""
# A
result = self.api.sessions.get_session_key(self.username, self.password)
result_value = result.get('result')
self.assertEqual(32, len(result_value))
self.assertEqual(str, type(result_value))
self.session_key = result_value
def test_get_session_key_failure(self):
"""
Requesting a session key with invalid creds should return None
- A. Verify the return value for bad credentials is None
"""
# A
result = self.api.sessions.get_session_key('bad_user', 'bad_pass')
result_value = result.get('result')
result_status = result_value.get('status')
self.assertEqual("Invalid user name or password", result_status)
def test_release_session_key_success(self):
"""
Releasing a valid session key should return "OK".
- A. Get a session key.
- B. Verify the return for a valid release request is "OK".
- C. Verify the return for a call using the released key fails.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
session_key = session.get('result')
# B
result = self.api.sessions.release_session_key(session_key)
result_value = result.get('result')
self.assertEqual("OK", result_value)
# C
call = self.api.surveys.list_surveys(result_value, self.username)
call_value = call.get('result')
call_status = call_value.get('status')
self.assertEqual("Invalid session key", call_status)
def test_release_session_key_failure(self):
"""
Releasing an invalid session key should return "OK".
- A. Verify the return for an invalid release request is "OK".
"""
# A
result = self.api.sessions.release_session_key("boguskey")
result_value = result.get('result')
self.assertEqual("OK", result_value)
class TestSurveys(TestBase):
def test_list_surveys_success(self):
"""
Requesting a list of surveys for a user should return survey properties.
- A. Get a new session key.
- B. Verify the result contains dict(s) each with a survey_id.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
result = self.api.surveys.list_surveys(self.session_key, self.username)
result_value = result.get('result')
for survey in result_value:
self.assertIsNotNone(survey.get('sid'))
def test_list_surveys_failure(self):
"""
Requesting a survey list for an invalid username should return error.
- A. Get new session key.
- B. Verify the result status is "Invalid user".
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
result = self.api.surveys.list_surveys(self.session_key, "not_a_user")
result_value = result.get('result')
status = result_value.get('status')
self.assertEqual("Invalid user", status)
def test_get_summary_success(self):
"""
Get summary of a survey
- A. Get a new session key.
- B. Verify the result contains dict(s).
- C. Get survey details for first survey
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
result_survey = self.api.surveys.get_summary(self.session_key,
survey_id)
survey_details = result_survey.get('result')
# example response:
# {'token_count': '26', 'token_invalid': '0', 'token_sent': '0',
# 'token_opted_out': '0', 'token_completed': '0'}
self.assertIn('token_count', survey_details)
self.assertIsInstance(survey_details['token_count'], str)
self.assertIn('token_invalid', survey_details)
self.assertIsInstance(survey_details['token_invalid'], str)
self.assertIn('token_sent', survey_details)
self.assertIsInstance(survey_details['token_sent'], str)
self.assertIn('token_opted_out', survey_details)
self.assertIsInstance(survey_details['token_opted_out'], str)
self.assertIn('token_completed', survey_details)
self.assertIsInstance(survey_details['token_completed'], str)
def test_get_summary_failure(self):
"""
Requesting a survey summery for an invalid survey ID should return
error.
- A. Get new session key.
- B. Construct invalid survey ID
- C. Verify the result status is "Invalid survey ID".
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_ids = [s.get('sid') for s in surveys.get('result')]
# construct an invalid survey ID by taking the longest ID
# (these are strings) and appending a '9'
survey_id_invalid = sorted(survey_ids, key=len)[-1] + '9'
# C
result = self.api.surveys.get_summary(self.session_key,
survey_id_invalid)
result_value = result.get('result')
status = result_value.get('status')
self.assertEqual("Invalid surveyid", status)
class TestTokens(TestBase):
def test_list_participants_and_get_properties(self):
"""
List of participant of an survey should return the tokens.
- A. Get a new session key.
- B. Get the survey id.
- C. List participants
- D. List participant properties for a single participant (we need a
valid token ID, thus included in this test)
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
participants = self.api.tokens.list_participants(self.session_key,
survey_id)
participants_result = participants.get('result')
self.assertIsNot(len(participants_result), 0)
self.assertIn('tid', participants_result[0])
self.assertIn('token', participants_result[0])
self.assertIn('participant_info', participants_result[0])
# By default, only 3 participant properties are returned
self.assertIn('firstname', participants_result[0]['participant_info'])
self.assertIn('lastname', participants_result[0]['participant_info'])
self.assertIn('email', participants_result[0]['participant_info'])
self.assertNotIn('language', participants_result[0]['participant_info'])
token_id = participants_result[0]['tid']
# D
participant = self.api.tokens.get_participant_properties(
self.session_key,
survey_id,
token_id
)
participant_result = participant.get('result')
self.assertIn('tid', participant_result)
self.assertEqual(participant_result['tid'], token_id)
# By default, all participant properties are returned
self.assertIn('language', participant_result)
self.assertIn('remindercount', participant_result)
def test_add_participants_success(self):
"""
Adding a participant to a survey should return their token string.
- A. Get a new session key.
- B. Get the survey id.
- C. Add participants.
- D. Verify the return for a valid request matches and has a token.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
participants = [
{'email': 't1@test.com', 'lastname': 'LN1', 'firstname': 'FN1'},
{'email': 't2@test.com', 'lastname': 'LN2', 'firstname': 'FN2'},
{'email': 't3@test.com', 'lastname': 'LN3', 'firstname': 'FN3'},
]
result = self.api.tokens.add_participants(
self.session_key, survey_id, participants)
# D
result_value = result.get('result')
tokens = sorted(result_value, key=itemgetter('tid'))
zipped = zip(tokens, participants)
for token, participant in zipped:
for key in participant:
self.assertEqual(participant[key], token[key])
self.assertIsNotNone(token["token"])
def test_add_participants_failure_survey(self):
"""
Add participants to an invalid survey returns an error.
- A. Get a new session key.
- B. Add participants to an invalid survey id.
- C. Verify the return for a invalid request is an error.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_ids = [s.get('sid') for s in surveys.get('result')]
# construct an invalid survey ID by taking the longest ID
# (these are strings) and appending a '9'
survey_id_invalid = sorted(survey_ids, key=len)[-1] + '9'
participants = [
{'email': 't1@test.com', 'lastname': 'LN1', 'firstname': 'FN1'},
{'email': 't2@test.com', 'lastname': 'LN2', 'firstname': 'FN2'},
{'email': 't3@test.com', 'lastname': 'LN3', 'firstname': 'FN3'},
]
result = self.api.tokens.add_participants(
self.session_key, survey_id_invalid, participants)
# C
result_value = result.get('result')
status = result_value.get('status')
self.assertEqual("Error: Invalid survey ID", status)
def test_add_participants_success_anonymous(self):
"""
Adding anonymous participants to an valid survey returns tokens.
- A. Get a new session key.
- B. Get valid survey ID.
- C. Add anonymous participants to valid survey id.
- D. Verify the return for a valid request matches and has a token.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
participants = [
{'email': 't1@test.com'},
{'lastname': 'LN2'},
{'firstname': 'FN3'},
]
result = self.api.tokens.add_participants(
self.session_key, survey_id, participants)
# D
result_value = result.get('result')
tokens = sorted(result_value, key=itemgetter('tid'))
zipped = zip(tokens, participants)
for token, participant in zipped:
for key in participant:
self.assertEqual(participant[key], token[key])
self.assertIsNotNone(token["token"])
def test_delete_participants_success(self):
"""
Deleting participants should return deleted token id list.
A. Get new session key.
B. Get a valid survey ID.
C. Create valid tokens.
D. Verify the delete response is the list of token ids and "Deleted".
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
participants = [
{'email': 't1@test.com', 'lastname': 'LN1', 'firstname': 'FN1'},
{'email': 't2@test.com', 'lastname': 'LN2', 'firstname': 'FN2'},
{'email': 't3@test.com', 'lastname': 'LN3', 'firstname': 'FN3'},
]
result = self.api.tokens.add_participants(
self.session_key, survey_id, participants)
# D
result_value = result.get('result')
token_ids = [x["tid"] for x in result_value]
deleted = self.api.tokens.delete_participants(
self.session_key, survey_id, token_ids)
deleted_tokens = deleted.get('result')
for token_id, token_result in deleted_tokens.items():
self.assertIn(token_id, token_ids)
self.assertEqual("Deleted", token_result)
def test_delete_participants_failure(self):
"""
Requesting to delete a token that doesn't exist returns an error.
A. Get new session key.
B. Get a valid survey ID.
C. Verify the result of delete for non existent token id is an error.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C TODO: derive from list_participants() to ensure it won't be wrong
tokens = [92929292, 929292945, 2055031111]
result = self.api.tokens.delete_participants(
self.session_key, survey_id, tokens)
result_value = result.get('result')
for token_id, token_result in result_value.items():
self.assertIn(int(token_id), tokens)
self.assertEqual("Invalid token ID", token_result)
class TestQuestions(TestBase):
def test_list_questions_success(self):
"""
Request to list questions for a valid survey should return the list.
A. Get a new session key.
B. Get a valid survey ID.
C. Verify the result contains a list with the SGQA components.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B
surveys = self.api.surveys.list_surveys(self.session_key, self.username)
survey_id = surveys.get('result')[0].get('sid')
# C
questions = self.api.questions.list_questions(
self.session_key, survey_id)
question_list = questions.get('result')
self.assertIsInstance(question_list, list)
for question in question_list:
self.assertEqual(survey_id, question["sid"])
self.assertIsNotNone(question["gid"])
self.assertIsNotNone(question["qid"])
def test_list_questions_failure(self):
"""
Requesting a question list for an invalid survey id returns an error.
A. Get a new session key.
B. Verify the result for a question list request is an error.
"""
# A
session = self.api.sessions.get_session_key(
self.username, self.password)
self.session_key = session.get('result')
# B TODO: derive from list_surveys() to ensure it won't be wrong
survey_id = 9999999
result = self.api.questions.list_questions(self.session_key, survey_id)
result_value = result.get('result')
status = result_value.get('status')
self.assertEqual("Error: Invalid survey ID", status)
|
{"/limesurveyrc2api/tests/tests.py": ["/limesurveyrc2api/__init__.py"], "/limesurveyrc2api/__init__.py": ["/limesurveyrc2api/limesurveyrc2api.py"]}
|
36,128
|
kjona/limesurveyrc2api
|
refs/heads/master
|
/limesurveyrc2api/__init__.py
|
from .limesurveyrc2api import LimeSurveyRemoteControl2API, LimeSurveyError
# Lifts the class into the package namespace instead of package.module
# Otherwise you'd need from limesurveyrc2api.limesurveyrc2api import Lime...
|
{"/limesurveyrc2api/tests/tests.py": ["/limesurveyrc2api/__init__.py"], "/limesurveyrc2api/__init__.py": ["/limesurveyrc2api/limesurveyrc2api.py"]}
|
36,129
|
kjona/limesurveyrc2api
|
refs/heads/master
|
/limesurveyrc2api/limesurveyrc2api.py
|
import requests
import json
from collections import OrderedDict
class LimeSurveyError(Exception):
"""Base class for exceptions in LimeSurvey."""
pass
class LimeSurveyRemoteControl2API(object):
def __init__(self, url):
self.url = url
self.headers = {"content-type": "application/json"}
self.utils = _Utils(self)
self.sessions = _Sessions(self)
self.surveys = _Surveys(self)
self.tokens = _Tokens(self)
self.questions = _Questions(self)
class _Utils(object):
def __init__(self, lime_survey_api):
self.api = lime_survey_api
def query(self, method, params):
"""
Query the LimeSurvey API
Important! Despite being provided as key-value, the API treats all
parameters as positional. OrderedDict should be used to ensure this,
otherwise some calls may randomly fail.
Parameters
:param method: Name of API method to call.
:type method: String
:param params: Parameters to the specified API call.
:type params: OrderedDict
Return
:return: result of API call
:raise: requests.ConnectionError
:raise: LimeSurveyError if the API returns an error (either http error
or error message in body)
"""
# 1. Prepare the request data
data = OrderedDict([
('method', method),
('params', params),
('id', 1) # Query ID - corresponding response will have the same ID
])
# 2. Query the API
response = requests.post(self.api.url, headers=self.api.headers, data=json.dumps(data))
response_content = response.json()
# 3. Evaluate the response
if not response.ok or response_content.get('error'):
raise LimeSurveyError(
"Error during query to '{}':{} {}".format(
self.api.url, response.status_code, response_content))
return response_content
class _Sessions(object):
def __init__(self, lime_survey_api):
self.api = lime_survey_api
def get_session_key(self, username, password):
"""
Get a session key for all subsequent API calls.
Parameters
:param username: LimeSurvey username to authenticate with.
:type username: String
:param password: LimeSurvey password to authenticate with.
:type password: String
"""
params = OrderedDict([
("username", username),
("password", password)
])
return self.api.utils.query('get_session_key', params)
def release_session_key(self, session_key):
"""
Close an open session.
"""
params = {'sSessionKey': session_key}
return self.api.utils.query('release_session_key', params)
class _Surveys(object):
def __init__(self, lime_survey_api):
self.api = lime_survey_api
def list_surveys(self, session_key, username):
"""
List surveys accessible to the specified username.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param username: LimeSurvey username to list accessible surveys for.
:type username: String
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', username)
])
return self.api.utils.query('list_surveys', params)
def get_summary(self, session_key, survey_id):
"""
Get participant properties in a survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey
:type survey_id: Integer
:return: dict with keys 'token_count', 'token_invalid', 'token_sent',
'token_opted_out', and 'token_completed' with strings as values.
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id)
])
return self.api.utils.query('get_summary', params)
class _Tokens(object):
def __init__(self, lime_survey_api):
self.api = lime_survey_api
def get_participant_properties(self, session_key, survey_id, token_id):
"""
Get participant properties in a survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey
:type survey_id: Integer
:param token_id: ID of the token to lookup
:type token_id: Integer
:return: Dict with all participant properties
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id),
('aTokenQueryProperties', {'tid': token_id})
])
return self.api.utils.query('get_participant_properties',
params)
def list_participants(self, session_key, survey_id, start=0, limit=1000,
ignore_token_used=False, attributes=False,
conditions=None):
"""
List participants in a survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey
:type survey_id: Integer
:param start: Index of first token to retrieve
:type start: Integer
:param limit: Number of tokens to retrieve
:type limit: Integer
:param ignore_token_used: If True, tokens that have been used are not
returned
:type ignore_token_used: Integer
:param attributes: The extended attributes that we want
:type attributes: List[String]
:param conditions: (optional) conditions to limit the list,
e.g. {'email': 't1@test.com'}
:type conditions: List[Dict]
:return: List of dictionaries
"""
conditions = conditions or []
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id),
('iStart', start),
('iLimit', limit),
('bUnused', ignore_token_used),
('aAttributes', attributes),
('aConditions', conditions)
])
return self.api.utils.query('list_participants', params)
def add_participants(self, session_key, survey_id, participant_data,
create_token_key=True):
"""
Add participants to the specified survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey to delete participants from.
:type survey_id: Integer
:param participant_data: List of participant detail dictionaries.
:type participant_data: List[Dict]
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id),
('aParticipantData', participant_data),
('bCreateToken', create_token_key)
])
return self.api.utils.query('add_participants', params)
def delete_participants(self, session_key, survey_id, tokens):
"""
Delete participants (by token) from the specified survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey to delete participants from.
:type survey_id: Integer
:param tokens: List of token IDs for participants to delete.
:type tokens: List[Integer]
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id),
('aTokenIDs', tokens)
])
return self.api.utils.query('delete_participants', params)
class _Questions(object):
def __init__(self, lime_survey_api):
self.api = lime_survey_api
def list_questions(self, session_key, survey_id,
group_id=None, language=None):
"""
Return a list of questions from the specified survey.
Parameters
:param session_key: Active LSRC2 session key
:type session_key: String
:param survey_id: ID of survey to list questions from.
:type survey_id: Integer
:param group_id: ID of the question group to filter on.
:type group_id: Integer
:param language: Language of survey to return for.
:type language: String
"""
params = OrderedDict([
('sSessionKey', session_key),
('iSurveyID', survey_id),
('iGroupID', group_id),
('sLanguage', language)
])
return self.api.utils.query('list_questions', params)
|
{"/limesurveyrc2api/tests/tests.py": ["/limesurveyrc2api/__init__.py"], "/limesurveyrc2api/__init__.py": ["/limesurveyrc2api/limesurveyrc2api.py"]}
|
36,131
|
kaizen123/Left-Ventricle-Segmentation
|
refs/heads/master
|
/preprocessing.py
|
import pydicom, cv2, re
import os, fnmatch, sys
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from itertools import izip
from utils import center_crop, lr_poly_decay, get_SAX_SERIES
SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = '../Data/'
TEST_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart1',
'OnlineDataContours')
TEST_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH, 'challenge_online/challenge_online')
VAL_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart2',
'ValidationDataContours')
VAL_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH, 'challenge_validation')
TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'Sunnybrook Cardiac MR Database ContoursPart3',
'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
'challenge_training')
SIZE = 256
def shrink_case(case):
toks = case.split('-')
def shrink_if_number(x):
try:
cvt = int(x)
return str(cvt)
except ValueError:
return x
return '-'.join([shrink_if_number(t) for t in toks])
class Contour(object):
def __init__(self, ctr_path):
self.ctr_path = ctr_path
match = re.search(r'/([^/]*)/contours-manual/IRCCI-expert/IM-0001-(\d{4})-.*', ctr_path)
self.case = shrink_case(match.group(1))
self.img_no = int(match.group(2))
self.ctr = np.loadtxt(self.ctr_path, delimiter=' ').astype('int')
def __str__(self):
return '<Contour for case %s, image %d>' % (self.case, self.img_no)
__repr__ = __str__
def read_contour(contour, data_path):
filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
full_path = os.path.join(data_path, contour.case, filename)
f = pydicom.read_file(full_path)
img = f.pixel_array.astype('int')
mask = np.zeros_like(img, dtype='uint8')
coords = np.loadtxt(contour.ctr_path, delimiter=' ').astype('int')
cv2.fillPoly(mask, [coords], 1)
if img.ndim < 3:
img = img[..., np.newaxis]
mask = mask[..., np.newaxis]
return img, mask
def map_all_contours(contour_path, shuffle=False):
contours = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(contour_path)
for f in fnmatch.filter(files,
'IM-0001-*-icontour-manual.txt')]
if shuffle:
print('Shuffling data')
np.random.shuffle(contours)
print('Number of examples: {:d}'.format(len(contours)))
contours = map(Contour, contours)
return contours
def export_all_contours(contours, data_path):
print('\nProcessing {:d} images and labels ...\n'.format(len(contours)))
images = np.zeros((len(contours), SIZE, SIZE, 1))
masks = np.zeros((len(contours), SIZE, SIZE, 1))
for idx, contour in enumerate(contours):
img, mask = read_contour(contour, data_path)
if img.shape[0] > SIZE:
img = center_crop(img, SIZE)
mask = center_crop(mask, SIZE)
images[idx] = img
masks[idx] = mask
return images, masks
def prepareDataset(contour_path, img_path):
contours = map_all_contours(contour_path)
img, mask = export_all_contours(contours, img_path)
return img, mask, contours
def reformDataXY(img, ROI, img_size = 64, mask_size = 32):
'''
Reform the image data and ROI for model
@param:
img: the original image, shape (N, 256, 256, 1)
ROI: the bounding box of region of interest, shape (N, mask_size, mask_size)
img_size: size image used for the model, default 64
mask_size: size of mask used for the model, default 32
@return:
X: the reformed data field, shape (N, img_size, img_size, 1)
Y: the reformed ground truth, shape (N, 1, mask_size, mask_size)
'''
X = np.zeros((img.shape[0], img_size, img_size, 1))
for i in range(X.shape[0]):
X[i,:,:,0] = cv2.resize(img[i,:,:,0], (img_size, img_size), interpolation = cv2.INTER_LINEAR)
Y = np.array(ROI).reshape((len(ROI),1, mask_size, mask_size))
return X, Y
def get_ROI(contours, shape_out = 32, img_size = 256):
'''
Given the path to the mask, return ROI -- the bounding box with size shape_out
@param
countour_path: the path to the mask dir
shape_out: the size of bounding box, default 32
img_size: original size of image, default 256
@return
ROI: the bounding box computed based on ground truth
'''
ROI = []
for i in range(len(contours)):
c = contours[i].ctr
X_min, Y_min = c[:,0].min(), c[:,1].min()
X_max, Y_max = c[:,0].max(), c[:,1].max()
w = X_max - X_min
h = Y_max - Y_min
roi_single = np.zeros((img_size, img_size))
if w > h :
roi_single[int(Y_min - (w -h)/2):int(Y_max + (w -h)/2), int(X_min):int(X_max)] = 1.0
else :
roi_single[int(Y_min):int(Y_max), int(X_min - (h-w)/2):int(X_max + (h -w)/2)] = 1.0
ROI.append(cv2.resize(roi_single, (shape_out, shape_out), interpolation = cv2.INTER_NEAREST))
return ROI
|
{"/preprocessing.py": ["/utils.py"]}
|
36,132
|
kaizen123/Left-Ventricle-Segmentation
|
refs/heads/master
|
/cnn_model.py
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras import regularizers
from keras.models import load_model
from keras.losses import mean_squared_error
def create_baseline_model(activation = 'relu', input_shape=(64, 64)):
model = Sequential()
model.add(Conv2D(100, (11,11), padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((6,6)))
model.add(Reshape([-1, 8100]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_model_larger(activation = 'relu', input_shape=(64, 64)):
"""
Larger (more filters) convnet model : one convolution, one average pooling and one fully connected layer:
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(200, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((6,6)))
model.add(Reshape([-1, 16200]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_model_deeper(activation = 'relu', input_shape=(64, 64)):
"""
Deeper convnet model : two convolutions, two average pooling and one fully connected layer:
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(64, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(AveragePooling2D((2,2)))
model.add(Conv2D(128, (10, 10), activation=activation, padding='valid', strides=(1, 1)))
model.add(AveragePooling2D((2,2)))
model.add(Reshape([-1, 128*9*9]))
model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def create_maxpooling_model(activation = 'relu', input_shape = (64,64)):
"""
Simple convnet model with max pooling: one convolution, one max pooling and one fully connected layer
:param activation: None if nothing passed, e.g : ReLu, tanh, etc.
:return: Keras model
"""
model = Sequential()
model.add(Conv2D(100, (11,11), activation='relu', padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))
model.add(MaxPooling2D((6,6)))
model.add(Reshape([-1, 8100]))
model.add(Dense(1024, activation = 'sigmoid', kernel_regularizer=regularizers.l2(0.0001)))
model.add(Reshape([-1, 32, 32]))
return model
def print_model(model):
print('Size for each layer :\nLayer, Input Size, Output Size')
for p in model.layers:
print(p.name.title(), p.input_shape, p.output_shape)
def run_cnn(data, train = False):
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
if train:
model = create_maxpooling_model()
print_model(model)
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
h = training(model, X_train, Y_train, batch_size=16, epochs= 10, data_augm=False)
metrics = 'loss'
plt.plot(range(len(h.history[metric])), h.history[metric])
plt.ylabel(metric)
plt.xlabel('epochs')
plt.title("Learning curve")
model.save('cnn_model_saved.h5')
y_pred = model.predict(X_test, batch_size = 16)
else:
try:
model = load_model('cnn_model_saved.h5')
except IOError as e:
print "I/O Error ({0}): {1}".format(e.errno, e.strerror)
y_pred = model.predict(X_test, batch_size = 16)
del model
return y_pred
def run(X, Y, model, X_to_pred=None, history=False, verbose=0, activation=None, epochs=20, data_augm=False):
if model == 'simple':
m = create_baseline_model(activation = activation)
elif model == 'larger':
m = create_model_larger(activation=activation)
elif model == 'deeper':
m = create_model_deeper(activation=activation)
elif model == 'maxpooling':
m = create_model_maxpooling(activation=activation)
m.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
if verbose > 0:
print('Size for each layer :\nLayer, Input Size, Output Size')
for p in m.layers:
print(p.name.title(), p.input_shape, p.output_shape)
h = training(m, X, Y, batch_size=16, epochs=epochs, data_augm=data_augm)
if not X_to_pred:
X_to_pred = X
y_pred = m.predict(X_to_pred, batch_size=16)
if history:
return h, m
else:
return m
def training(model, X, Y, batch_size=16, epochs= 10, data_augm=False):
"""
Training CNN with the possibility to use data augmentation
:param m: Keras model
:param X: training pictures
:param Y: training binary ROI mask
:return: history
"""
if data_augm:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=50, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False)
datagen.fit(X)
history = model.fit_generator(datagen.flow(X, Y,
batch_size=batch_size),
steps_per_epoch=X.shape[0] // batch_size,
epochs=epochs)
else:
history = model.fit(X, Y, batch_size=batch_size, epochs=epochs)
return history
|
{"/preprocessing.py": ["/utils.py"]}
|
36,133
|
kaizen123/Left-Ventricle-Segmentation
|
refs/heads/master
|
/StackedAeModel.py
|
import keras
from keras.models import Model,Sequential
from keras.layers import Input,Dense, Dropout, Activation, Flatten, Reshape, Conv2D, MaxPooling2D, AveragePooling2D
from keras import regularizers
from keras.losses import mean_squared_error
from keras import losses
import matplotlib.patches as patches
import numpy as np
import dicom
import cv2
import matplotlib.pyplot as plt
def model1(X_train, get_history=False, verbose=0, param_reg=0.001):
autoencoder_0 = Sequential()
encoder_0 = Dense(input_dim=4096, units=100, kernel_regularizer=regularizers.l2(param_reg))
decoder_0 = Dense(input_dim=100, units=4096, kernel_regularizer=regularizers.l2(param_reg))
autoencoder_0.add(encoder_0)
autoencoder_0.add(decoder_0)
autoencoder_0.compile(loss= 'mse',optimizer='adam', metrics=['accuracy'])
h = autoencoder_0.fit(X_train, X_train, epochs=200, verbose=verbose)
temp_0 = Sequential()
temp_0.add(encoder_0)
temp_0.compile(loss= 'mse', optimizer='adam', metrics=['accuracy'])
encoded_X = temp_0.predict(X_train, verbose=0)
if get_history:
return h.history['loss'], encoded_X, encoder_0
else:
return encoded_X, encoder_0
def model2(X_train,encoded_X, encoder_0, get_history=False, verbose=0, param_reg=0.001):
autoencoder_1 = Sequential()
encoder_1 = Dense(input_dim=100, units=100, kernel_regularizer=regularizers.l2(param_reg))
decoder_1 = Dense(input_dim=100, units=100, kernel_regularizer=regularizers.l2(param_reg))
autoencoder_1.add(encoder_1)
autoencoder_1.add(decoder_1)
autoencoder_1.compile(loss= 'mse', optimizer='adam', metrics=['accuracy'])
h = autoencoder_1.fit(encoded_X, encoded_X, epochs=200, verbose=verbose)
temp_0 = Sequential()
temp_0.add(encoder_0)
temp_0.compile(loss= 'mse', optimizer='adam', metrics=['accuracy'])
encoded_X = temp_0.predict(X_train, verbose=0)
if get_history:
return h.history['loss'], encoder_1
else:
return encoder_1
def model3(X_train, Y_train, encoder_0, encoder_1, init='zero',
get_history=False, verbose=0, param_reg=0.001):
model = Sequential()
model.add(encoder_0)
model.add(encoder_1)
model.add(Dense(input_dim=100, units=4096, kernel_initializer=init, kernel_regularizer=regularizers.l2(param_reg)))
model.compile(optimizer = 'adam', loss = "MSE", metrics=['accuracy'])
h = model.fit(X_train, Y_train, epochs=200, verbose=verbose)
if get_history:
return h.history['loss'], model
else:
return model
def SAE(X_train,Y_train,init='zero'):
encoded_X, encoder_0 = model1(X_train)
encoder_1 = model2(X_train,encoded_X,encoder_0)
h, model = model3(X_train, Y_train, encoder_0, encoder_1,init, get_history=True)
return h,model
|
{"/preprocessing.py": ["/utils.py"]}
|
36,134
|
kaizen123/Left-Ventricle-Segmentation
|
refs/heads/master
|
/utils.py
|
#!/usr/bin/env python2.7
import numpy as np
import cv2
from keras import backend as K
import os
from sklearn.metrics import confusion_matrix
import itertools
import numpy as np
import matplotlib.pyplot as plt
def get_SAX_SERIES():
SAX_SERIES = {}
with open('SAX_series.txt', 'r') as f:
for line in f:
if not line.startswith('#'):
key, val = line.split(':')
SAX_SERIES[key.strip()] = val.strip()
return SAX_SERIES
def mvn(ndarray):
'''Input ndarray is of rank 3 (height, width, depth).
MVN performs per channel mean-variance normalization.
'''
epsilon = 1e-6
mean = ndarray.mean(axis=(0,1), keepdims=True)
std = ndarray.std(axis=(0,1), keepdims=True)
return (ndarray - mean) / (std + epsilon)
def reshape(ndarray, to_shape):
'''Reshapes a center cropped (or padded) array back to its original shape.'''
h_in, w_in, d_in = ndarray.shape
h_out, w_out, d_out = to_shape
if h_in > h_out: # center crop along h dimension
h_offset = (h_in - h_out) / 2
ndarray = ndarray[h_offset:(h_offset+h_out), :, :]
else: # zero pad along h dimension
pad_h = (h_out - h_in)
rem = pad_h % 2
pad_dim_h = (pad_h/2, pad_h/2 + rem)
# npad is tuple of (n_before, n_after) for each (h,w,d) dimension
npad = (pad_dim_h, (0,0), (0,0))
ndarray = np.pad(ndarray, npad, 'constant', constant_values=0)
if w_in > w_out: # center crop along w dimension
w_offset = (w_in - w_out) / 2
ndarray = ndarray[:, w_offset:(w_offset+w_out), :]
else: # zero pad along w dimension
pad_w = (w_out - w_in)
rem = pad_w % 2
pad_dim_w = (pad_w/2, pad_w/2 + rem)
npad = ((0,0), pad_dim_w, (0,0))
ndarray = np.pad(ndarray, npad, 'constant', constant_values=0)
return ndarray # reshaped
def center_crop(ndarray, crop_size):
'''Input ndarray is of rank 3 (height, width, depth).
Argument crop_size is an integer for square cropping only.
Performs padding and center cropping to a specified size.
'''
h, w, d = ndarray.shape
if crop_size == 0:
raise ValueError('argument crop_size must be non-zero integer')
if any([dim < crop_size for dim in (h, w)]):
# zero pad along each (h, w) dimension before center cropping
pad_h = (crop_size - h) if (h < crop_size) else 0
pad_w = (crop_size - w) if (w < crop_size) else 0
rem_h = pad_h % 2
rem_w = pad_w % 2
pad_dim_h = (pad_h/2, pad_h/2 + rem_h)
pad_dim_w = (pad_w/2, pad_w/2 + rem_w)
# npad is tuple of (n_before, n_after) for each (h,w,d) dimension
npad = (pad_dim_h, pad_dim_w, (0,0))
ndarray = np.pad(ndarray, npad, 'constant', constant_values=0)
h, w, d = ndarray.shape
# center crop
h_offset = (h - crop_size) / 2
w_offset = (w - crop_size) / 2
cropped = ndarray[h_offset:(h_offset+crop_size),
w_offset:(w_offset+crop_size), :]
return cropped
def lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5):
lrate = base_lr * (1.0 - (curr_iter / float(max_iter)))**power
K.set_value(model.optimizer.lr, lrate)
return K.eval(model.optimizer.lr)
def dice_coef(y_true, y_pred):
intersection = np.sum(y_true * y_pred, axis=None)
summation = np.sum(y_true, axis=None) + np.sum(y_pred, axis=None)
return 2.0 * intersection / summation
def jaccard_coef(y_true, y_pred):
intersection = np.sum(y_true * y_pred, axis=None)
union = np.sum(y_true, axis=None) + np.sum(y_pred, axis=None) - intersection
return float(intersection) / float(union)
def get_confusion_matrix_bbox(mask, y_pred):
'''
Using confusion matrix to evaluate the performance of cropping
For each mask - pred pair, compute the bbox of pred, regard mask as ground truth, bbox as prediction,
apply confusion matrix metrics. After that, average over all confusion matrix.
'''
pred_box = np.zeros((mask.shape))
n = mask.shape[0]
for i in range(n):
pred = y_pred[i, 0, :,:]
[x_min, x_max, y_min, y_max] = get_bbox_single(pred)
pred_box[i, x_min:x_max, y_min:y_max, 0] = 1
pred_box = np.reshape(pred_box, [n, pred_box.shape[1]*pred_box.shape[1]])
mask = np.reshape(mask, [n, mask.shape[1] * mask.shape[1]])
#cm = confusion_matrix(mask, pred_box)
cm = np.zeros((2,2))
for i in range(n):
cm = cm + confusion_matrix(mask[i,:], pred_box[i,:])
cm = cm / n
return cm
def get_cropped(img, y_pred, roi_size = 32, win_size = 100):
'''
Cropped the original image using CNN prediction
@param:
img: the original image, shape (N, WIDTH, HEIGHT, 1), default size 256
y_pred: the prediction of ROI, may be showed as scatter binary image, shape (N, 1, roi_size, roi_size)
roi_size: the size of y_pred, default 32
win_size: the size of window used to crop the original image, default 80
@return
cropped: the cropped image, same format with input img, but with smaller size of win_size
'''
n = img.shape[0]
cropped = np.zeros((n, win_size, win_size, 1))
for i in range(y_pred.shape[0]):
pred = y_pred[i, 0, :,:]
[x_min, x_max, y_min, y_max] = get_bbox_single(pred, win_size = win_size)
cropped[i, :, :, 0] = img[i, x_min:x_max, y_min:y_max, 0]
return cropped
def get_bbox_single(pred, roi_size = 32, win_size = 100):
'''
Compute the bounding box param of the given binary region mask
This implementation compute the median of x, y as the middle point.
'''
ind = np.array(np.where(pred > 0.5))
[x_median, y_median] = np.median(ind, axis=1)
x_median *= 256 / roi_size
y_median *= 256 / roi_size
x_min = int(max(0, x_median - win_size / 2))
y_min = int(max(0, y_median - win_size / 2))
x_max = x_min + win_size
y_max = y_min + win_size
return [x_min, x_max, y_min, y_max]
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
{"/preprocessing.py": ["/utils.py"]}
|
36,145
|
hartescout/Malware-Lake
|
refs/heads/master
|
/main.py
|
import module_db as mldb
if __name__ == "__main__":
"""
Create a new database object
This object is for the Malware Bazaar Database
Please set your API key
"""
db_bazaar = mldb.Database("Bazaar",
"API-KEY-BAZAAR",
"https://bazaar.abuse.ch/export/csv/recent",
["first_seen_utc","sha256_hash", "file_name", "file_type_guess", "signature", "clamav", "vtpercent"],
False)
"""
Create a new database object
This object is for the Malshare Database
Please set your API key
"""
db_malshare = mldb.Database("Malshare",
"API-KEY-MALSHARE",
"https://malshare.com/daily/malshare.current.sha256.txt",
None,
False)
"""
Downloaded raw databases for Malware Bazaar and Malshare
"""
if(db_bazaar.createDatabase(467) == False):
print("Failed to download database")
if(db_malshare.createDatabase(0) == False):
print("Failed to download database")
"""
Generate new full database
"""
if(mldb.generateFullDB(db_bazaar, db_malshare) == False):
print("Failed to create database")
"""
TODO
Allow recent/full/monthly updates to each DB
Add lookup table to file types
"""
|
{"/main.py": ["/module_db.py"], "/module_db.py": ["/module_api_parser.py"]}
|
36,146
|
hartescout/Malware-Lake
|
refs/heads/master
|
/module_db.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 15:59:40 2020
@author: Danus
"""
import os
import requests
import pandas
import module_api_parser as api_parser
from zipfile import ZipFile
from datetime import date
"""
A const dictionary containing extraction settings for each database type
"""
const_dict_extract_type_db = {
"Bazaar": {"columns":["Tags", "Delivery", "Source"],
"hash_col": "sha256_hash",
"api_func": api_parser.getDataBazaar},
"Malshare": {"columns":["first_seen_utc", "file_type_guess", "Tags"],
"hash_col": "sha256_hash",
"api_func": api_parser.getDataMalshare},
}
current_date = str(date.today())
current_dir = os.getcwd()
"""
Location of the master database
"""
current_main_db = "{0:s}\Main_test.csv".format(current_dir)
"""
Database Constructor
IN string_db_name - Name of the database
IN string_api_key - API key of the database
IN string_db_source - URL Location of the database
IN list_db_headers - A list of header fields that will be used in the database
IN isZipped - Boolean indicating if the raw database file is zipped or not
"""
class Database:
def __init__(self, string_db_name, string_api_key, string_db_source, list_db_headers, isZipped=False):
self.string_db_name = string_db_name
self.string_api_key = string_api_key
self.string_db_source = string_db_source
self.list_db_headers = list_db_headers
self.isZipped = isZipped
self.string_db_path_full = "{0:s}/{1:s}/{2:s}_{3:s}".format(
current_dir,
string_db_name,
current_date,
string_db_name)
self.string_db_path_dir = "{0:s}/{1:s}/".format(
current_dir,
string_db_name)
self.dataframe_db = None
"""
This fuction unzips a compressed database file
IN - The bytes of the zipped file
IN - File object handle of the zipped file
OUT - Contents of the extracted database file in bytes
Assumes that there is only one file in the zip archive
"""
def CleanDownloadedDatabase(self, num_trash_bytes):
extracted_contents = ""
file_name = ""
if(self.isZipped == True):
try:
with ZipFile(self.string_db_path_full, 'r') as zipObj:
file_name = zipObj.namelist()
file_name = "{0:s}/{1:s}".format(self.string_db_path_dir, file_name[0])
zipObj.extractall(self.string_db_path_dir)
except Exception as e:
print(e)
return False
os.remove(self.string_db_path_full)
os.rename(file_name, self.string_db_path_full)
try:
with open(self.string_db_path_full, 'rb') as extracted_file:
extracted_contents = extracted_file.read()
except Exception as e:
print(e)
return False
try:
with open(self.string_db_path_full, 'wb') as extracted_file:
extracted_contents = extracted_contents[num_trash_bytes:]
"""
Cleans double qoutes and space character entries from the raw database
"""
extracted_contents = extracted_contents.replace(b'\x20\x22', b'')
extracted_contents = extracted_contents.replace(b'\x22', b'')
extracted_file.write(extracted_contents)
except Exception as e:
print(e)
return False
return True
"""
This function creates a raw database from the source provided to Database the object
IN - Number of bytes to remove from the raw database outputp
"""
def createDatabase(self, num_trash_bytes):
print("[{0:s}]Preparing Database creation!".format(self.string_db_name))
"""
If the database folder doesnt exist, create it
"""
if not os.path.exists(self.string_db_path_dir):
os.mkdir(self.string_db_path_dir)
"""
Check if a raw database already exists for the current date
"""
if not os.path.exists(self.string_db_path_full):
print("[{0:s}]Creating Database! URL:{1:s}".format(self.string_db_name, self.string_db_source))
try:
with requests.get(self.string_db_source, timeout=10) as response, open(self.string_db_path_full, 'wb' ) as out_file:
out_file.write(response.content)
except Exception as e:
print(e)
print("[{0:s}]An error occurred during the database creation!".format(self.string_db_name))
print("[{0:s}]Database created!".format(self.string_db_name))
else:
print("[{0:s}]Database already exists!".format(self.string_db_name))
if(self.CleanDownloadedDatabase(num_trash_bytes) == False):
print("[{0:s}]Unable to handle downloaded file!".format(self.string_db_name))
return False
print()
return True
"""
This function creates a pandas dataframe from a raw database of the Database Object
OUT -
"""
def readDataFrame(self):
df_read = pandas.read_csv(self.string_db_path_full, error_bad_lines=False, usecols=self.list_db_headers)
"""
If no header fields exist, the default first header field name is sha256_hash
"""
if(self.list_db_headers == None):
df_read.columns = ["sha256_hash"]
return df_read
"""
This function creates a full data frame constructed out of the raw database
and the extra information extracted using the API extraction functions
"""
def getFullDataFrame(self):
"""
Read the current raw database into a dataframe
"""
df_read = self.readDataFrame()
dict_db_info = const_dict_extract_type_db.get(self.string_db_name, None)
if(dict_db_info == None):
print("[{0:s}]No such database type!".format(self.string_db_name))
return False
print("[{0:s}]Beggining extraction..".format(self.string_db_name))
"""
cols = new columns to create
hash_col = the name of the hash field
func = a pointer to the database API handling function
"""
cols = dict_db_info["columns"]
hash_col = dict_db_info["hash_col"]
func = dict_db_info["api_func"]
"""
Create new dataframe to hold the new extracted values
"""
df_newdata = pandas.DataFrame(columns = cols)
"""
For every row in the data frame
"""
for index, row in df_read.iterrows():
sha256_hash = row[hash_col]
new_data = func(self.string_api_key, sha256_hash)
df_newdata.loc[index] = new_data
df_newdata = pandas.concat([df_read, df_newdata], axis=1, sort=False)
self.dataframe_db = df_newdata
return True
"""
This function merges two Database objects and creates a csv file out of them
IN db_first - A Database object
IN db_second - A database object
"""
def generateFullDB(db_first, db_second):
if(db_first.getFullDataFrame() == False):
print("Failed to create the first full data frame!")
return False
df_db = db_first.dataframe_db
if(db_second != None):
if(db_second.getFullDataFrame() == False):
print("Failed to create the second full data frame!")
return False
df_db = pandas.concat([db_first.dataframe_db, db_second.dataframe_db])
df_db = df_db.drop_duplicates("sha256_hash")
df_db["vtpercent"].fillna("-1", inplace=True)
df_db.fillna("n/a", inplace=True)
bool_headers = False
if not os.path.exists(current_main_db):
bool_headers = True
df_db.to_csv(current_main_db, index=False, mode="a", header=bool_headers)
return True
|
{"/main.py": ["/module_db.py"], "/module_db.py": ["/module_api_parser.py"]}
|
36,147
|
hartescout/Malware-Lake
|
refs/heads/master
|
/module_api_parser.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 17:27:42 2020
@author: Danus
"""
import requests
from datetime import datetime
"""
Constant dictionary that defines all possible repsonses that can return from AP calls
that can return from Malware Bazaar
"""
const_dict_resposne_bazzar = {
"ok": True,
"illegal_hash": "Illegal hash!",
"hash_not_found": "Hash not found!",
"no_hash_provided": "No hash provided!",
"http_post_expected": "Http post command expected!"
}
"""
This function returns extra information about a SHA256 hash from Malware Bazaar
IN - API Key for Malware Bazaar
IN - SHA256 Hash for sample
OUT - List object containing extra info about the hash [Tag, Delivery Method, Source]
"""
def getDataBazaar(string_api_key, string_sha256):
url="https://mb-api.abuse.ch/api/v1/"
data_query = {
"query": "get_info",
"hash": string_sha256
}
data_headers = {'API-KEY': string_api_key }
try:
response = requests.post(url, headers=data_headers, data=data_query)
except Exception as e:
print(e)
print("[Bazzar]Failed to query %s hash!" %(string_sha256))
return [False, False, False]
try:
response_json = response.json()
except Exception as e:
print(e)
print("[Bazzar]Failed to prase json for hash %s!" %(string_sha256))
return [False, False, False]
#print(string_sha256)
response_status = const_dict_resposne_bazzar.get(response_json["query_status"],None)
if(response_status != True):
print("[Bazzar] %s hash %s" %(response_status, string_sha256))
return [False, False, False]
elif(response_status == None):
print("[Bazzar]Unknown error!")
return [False, False, False]
try:
response_json = response.json()
except:
print("[Bazzar]Failed to prase json for hash %s!" %(string_sha256))
return [False, False, False]
response_json = response.json()["data"][0]
tags = response_json.get("tags", "n/a")
delivery_method = response_json.get("delivery_method", "n/a")
intelligence = response_json.get("file_information", "n/a")
if(intelligence != None):
intelligence = intelligence[0]
intelligence = intelligence.get("value", "n/a")
if(tags == None):
tags = "n/a"
if(delivery_method == None):
delivery_method = "n/a"
if(intelligence == None):
intelligence = "n/a"
return [tags, delivery_method, intelligence]
"""
This function returns extra information about a SHA256 hash from Malshare
IN - API Key for Malshare
IN - SHA256 Hash for sample
OUT - List object containing extra info about the hash [First seen, File type, Tag, Source]
"""
def getDataMalshare(string_api_key, string_sha256):
url="https://malshare.com/api.php?"
data_query = {
"api_key": string_api_key,
"action": "search",
"query": string_sha256
}
try:
response = requests.get(url, params=data_query, timeout=10)
except Exception as e:
print("[Malshare]",e)
return [False, False, False]
try:
response_json = response.json()
except Exception as e:
print(e)
print("[Malshare]Failed to prase json for hash %s!" %(string_sha256))
return [False, False, False]
utc_first_seen = datetime.utcfromtimestamp(response_json["added"]).strftime('%Y-%m-%d %H:%M:%S')
file_type_guess = response_json["type"]
tags = response_json["yarahits"]
tags = tags["yara"]
return [utc_first_seen, file_type_guess, tags]
|
{"/main.py": ["/module_db.py"], "/module_db.py": ["/module_api_parser.py"]}
|
36,148
|
sevberg/prefect_playground
|
refs/heads/main
|
/workflow/flow_generator.py
|
from os import environ, path
from prefect import Flow, unmapped, Parameter
from prefectplayground.tasks import add_matrix, generate_list
def BasicFlow() -> Flow:
with Flow(
name="basic_flow",
) as flow:
# Simple list generation task
generate_list_n_members = Parameter("generate_list_n_members", default=100)
generate_list_min_value = Parameter("generate_list_min_value", default=20)
generate_list_max_value = Parameter("generate_list_max_value", default=30)
generate_list_cycles = Parameter("generate_list_cycles", default=1)
generate_list_seed = Parameter("generate_list_seed", default=0)
members = generate_list(
n_members=generate_list_n_members,
min_value=generate_list_min_value,
max_value=generate_list_max_value,
cycles=generate_list_cycles,
seed=generate_list_seed,
)
# Mapped task
add_matrix_size = Parameter("add_matrix_size", default=5000)
add_matrix_seed = Parameter("add_matrix_seed", default=1)
member_means = add_matrix.map(
cycles=members,
seed=unmapped(add_matrix_seed),
size=unmapped(add_matrix_size),
)
return flow
if __name__ == "__main__":
BasicFlow().run()
|
{"/workflow/flow_generator.py": ["/prefectplayground/tasks.py"]}
|
36,149
|
sevberg/prefect_playground
|
refs/heads/main
|
/workflow/k8s_dask_executor.py
|
from prefect.storage import Docker
from prefect.run_configs import KubernetesRun
from prefect.executors.dask import DaskExecutor
from os import path
import flow_generator
# Fetch flow
flow = flow_generator.BasicFlow()
# Build and push image
module_dir = path.dirname(path.dirname(path.abspath(__file__)))
def get_requirements(*extras):
reqs = []
for line in open(path.join(module_dir, "requirements.txt")):
reqs.append(line[:-1])
reqs.extend(extras)
return reqs
flow.storage = Docker(
python_dependencies=get_requirements(),
registry_url="registry.hub.docker.com/",
image_name="sevberg/prefect_playground",
image_tag="latest",
files={
path.join(
module_dir, "requirements.txt"
): "/modules/prefect_playground/requirements.txt",
path.join(module_dir, "README.md"): "/modules/prefect_playground/README.md",
path.join(module_dir, "LICENSE"): "/modules/prefect_playground/LICENSE",
path.join(module_dir, "setup.py"): "/modules/prefect_playground/setup.py",
path.join(module_dir, ".version"): "/modules/prefect_playground/.version",
path.join(
module_dir, "prefectplayground"
): "/modules/prefect_playground/prefectplayground",
},
extra_dockerfile_commands=[
"RUN pip install --no-deps -e /modules/prefect_playground"
],
)
# Create run config
flow.run_config = KubernetesRun(
cpu_request=2, memory_request="2G", env={"AWS_DEFAULT_REGION": "eu-central-1"}
)
# Create Dask Executor
def make_cluster(n_workers, image):
"""Start a cluster using the same image as the flow run"""
from dask_kubernetes import KubeCluster, make_pod_spec
pod_spec = make_pod_spec(
image=image,
memory_limit="1900M",
memory_request="1900M",
cpu_limit=0.5,
cpu_request=0.5,
)
return KubeCluster(pod_spec, n_workers=n_workers)
flow.executor = DaskExecutor(
cluster_class=make_cluster,
cluster_kwargs={"n_workers": 10, "image": flow.storage.name},
)
# Register flow
flow.register(project_name="prefect_playground", labels=["dev"])
|
{"/workflow/flow_generator.py": ["/prefectplayground/tasks.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.