index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
26,029
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/src/img_transform.py
|
from PIL import Image
transparent_color = (255, 255, 255, 0)
black_color = (0, 0, 0, 255)
def create_silhouette_of_img(input_image_path: str, output_image_path: str) -> None:
"""Creates a silhouette image
Args:
input_image_path: The name of the image you want to create a silhouette
output_image_path: The name of the file that you want to output the silhouette too
Returns:
None
"""
picture = Image.open(input_image_path) # Open the picture
width, height = picture.size
# Process every pixel
for x in range(width):
for y in range(height):
current_color = picture.getpixel((x, y))
if current_color[3] > 0: # If it's transparent color then we skip
# If its not transparent then we change the color to black
# to create a sort of silhouette image
picture.putpixel((x, y), black_color)
picture.save(output_image_path)
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,030
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/tests/test_pokemon_assets.py
|
import os
import shutil
from typing import Tuple
import src.pokemon_assets
from src.pokemon_assets import output_dir, saved_file_type, silhouette_image_suffix, original_image_suffix, \
original_image_s3_path, silhouette_image_s3_path
def test_pad_pokemon_id():
assert src.pokemon_assets.pad_pokemon_id(1) == "001"
assert src.pokemon_assets.pad_pokemon_id(23) == "023"
assert src.pokemon_assets.pad_pokemon_id(144) == "144"
def test_get_pokemon_image_url():
assert src.pokemon_assets.get_pokemon_assets_image_url(
1) == f"{src.config['pokemon_assets_url']}001.png"
def test_get_pokemon_orig_fileinfo():
assert src.pokemon_assets.get_pokemon_orig_fileinfo("bulbasaur") == (
f"{output_dir}{original_image_s3_path}bulbasaur{original_image_suffix}{saved_file_type}",
f"bulbasaur{original_image_suffix}{saved_file_type}")
def test_get_pokemon_silhouette_fileinfo():
src.pokemon_assets.get_pokemon_silhouette_fileinfo("bulbasaur")
def test_download_img_from_pokemon_assets():
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + original_image_s3_path):
os.makedirs(output_dir + original_image_s3_path)
if not os.path.exists(output_dir + silhouette_image_s3_path):
os.makedirs(output_dir + silhouette_image_s3_path)
src.pokemon_assets.download_img_from_pokemon_assets(1)
shutil.rmtree(output_dir)
# def test_download_all_pokemon_img():
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
#
# src.pokemon_assets.multi_download_all_pokemon_img()
#
# # assert len([f for f in os.listdir(output_dir)
# # if os.path.isfile(os.path.join(f"{output_dir}", f))]) == config["max_pokemon_id"] * 2
#
# os.removedirs(output_dir) # Removes directory
def test_get_pokemon_name_from_id():
assert src.pokemon_assets.get_pokemon_name_from_id(1) == "bulbasaur"
assert src.pokemon_assets.get_pokemon_name_from_id(700) == "sylveon"
assert src.pokemon_assets.get_pokemon_name_from_id(550) == "basculin"
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,031
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/src/dynamo.py
|
from src import dynamo_table
import json
import decimal
import boto3
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(dynamo_table)
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if abs(o) % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
def put_pokemon_data(pokedex_id: int, pokemon_name: str, pokemon_orig_image_url: str, pokemon_bw_image_url: str):
table.put_item(
Item={
"PokedexID": pokedex_id,
"Name": pokemon_name,
"OriginalImageUrl": pokemon_orig_image_url,
"BWImageUrl": pokemon_bw_image_url
}
)
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,032
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/src/handler.py
|
import src.pokemon_assets
import logging
import datetime
def run(event, context):
current_date = datetime.datetime.now()
logging.debug(f"Running whos_that_pokemon_s3gallery: {current_date}")
src.pokemon_assets.multi_download_all_pokemon_img()
logging.debug(f"Successfully completed; {current_date}")
if __name__ == '__main__':
run({}, {})
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,033
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/src/__init__.py
|
import os
import yaml
module_dir = os.path.dirname(__file__)
s3_bucket = os.environ["S3_BUCKET"]
dynamo_table = os.environ["DYNAMO_TABLE"]
config = {}
# Loads the config
with open(f"{module_dir}/config.yml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
raise SystemExit
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,034
|
Beartime234/whos-that-pokemon-s3gallery
|
refs/heads/master
|
/tests/test_img_transform.py
|
import os
import src.img_transform
from tests import test_dir
test_input_image_orig = f"{test_dir}/sneasel.png"
test_output_image_silhouette = f"{test_dir}/sneasel-bw.png"
def test_create_silhouette_of_img():
src.img_transform.create_silhouette_of_img(test_input_image_orig, test_output_image_silhouette)
os.remove(test_output_image_silhouette)
|
{"/tests/test_util.py": ["/src/util.py"], "/src/pokemon_assets.py": ["/src/img_transform.py", "/src/s3.py", "/src/util.py", "/src/dynamo.py", "/src/__init__.py"], "/tests/test_pokemon_assets.py": ["/src/pokemon_assets.py"], "/src/dynamo.py": ["/src/__init__.py"], "/src/handler.py": ["/src/pokemon_assets.py"], "/tests/test_img_transform.py": ["/src/img_transform.py"]}
|
26,035
|
pierfied/nnacc
|
refs/heads/main
|
/nnacc/sampler.py
|
import torch
from tqdm.auto import tqdm
from .HMCSampler import HMCSampler
class Sampler:
def __init__(self, lnp, x0=None, m=None, transform=None, device='cpu'):
self.lnp = lnp
self.transform = transform
self.device=device
if x0 is None:
self.x0 = torch.randn(self.nparams, device=self.device)
else:
self.x0 = x0.to(dtype=torch.float32, device=device)
self.nparams = len(self.x0)
if m is None:
self.m = torch.ones(self.nparams, device=self.device)
else:
self.m = m.to(dtype=torch.float32, device=device)
def calc_hess_mass_mat(self, nsteps=1000, eps=1e-4, resamp_x0=True):
x = self.x0.clone().requires_grad_()
pbar = tqdm(range(nsteps))
for i in pbar:
lnp = self.lnp(x)
grad = torch.autograd.grad(lnp, x)[0]
x = x + grad * eps
pbar.set_description('log-prob: {}'.format(lnp))
hess = []
lnp = self.lnp(x)
grad = torch.autograd.grad(lnp, x, create_graph=True)[0]
for i in range(self.nparams):
hess.append(torch.autograd.grad(grad[i], x, retain_graph=True)[0])
hess = torch.stack(hess)
u, m, _ = torch.svd(-hess)
s = 1 / m
self.u = u
self.m = m
self.orig_lnp = self.lnp
self.xmap = x.detach().clone()
self.lnp = lambda x: self.orig_lnp(self.xmap + self.u @ x)
if self.transform is None:
self.transform = lambda x: self.xmap + self.u @ x
else:
self.orig_transform = self.transform
self.transform = lambda x: self.orig_transform(self.xmap + self.u @ x)
if resamp_x0:
self.x0 = torch.randn(self.nparams, device=self.device) * torch.sqrt(s)
def sample(self, nburn, burn_steps, burn_eps, nsamp, samp_steps, samp_eps):
hmc = HMCSampler(self.lnp, self.x0, self.m, self.transform, device=self.device)
hmc.sample(nburn, burn_steps, burn_eps)
chain = hmc.sample(nsamp, samp_steps, samp_eps)
return chain
|
{"/nnacc/predictor.py": ["/nnacc/nn.py"], "/nnacc/__init__.py": ["/nnacc/predictor.py", "/nnacc/nn.py", "/nnacc/sampler.py"]}
|
26,036
|
pierfied/nnacc
|
refs/heads/main
|
/setup.py
|
from setuptools import setup
setup(
name='nnacc',
version='',
packages=['nnacc'],
url='https://github.com/pierfied/nnacc',
license='',
author='Pier Fiedorowicz',
author_email='pierfied@email.arizona.edu',
description='NNACC - Neural Network Accelerator for Cosmology Codes'
)
|
{"/nnacc/predictor.py": ["/nnacc/nn.py"], "/nnacc/__init__.py": ["/nnacc/predictor.py", "/nnacc/nn.py", "/nnacc/sampler.py"]}
|
26,037
|
pierfied/nnacc
|
refs/heads/main
|
/nnacc/predictor.py
|
import torch
from torch import nn
from .nn import ResBlock
from tqdm.auto import tqdm
class Predictor(nn.Module):
def __init__(self, in_size, out_size, model=None, optim=None, X_transform=None,
y_transform=None, device='cpu'):
super(Predictor, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.device = device
if model is not None:
self.model = model.to(device)
else:
hidden_size = 256
self.model = nn.Sequential(
nn.Linear(in_size, hidden_size),
ResBlock(hidden_size, hidden_size),
ResBlock(hidden_size, hidden_size),
ResBlock(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, out_size)
).to(device)
if optim is not None:
self.optim = optim
else:
self.optim = torch.optim.Adam(self.model.parameters())
if X_transform is not None:
self.X_transform = X_transform
else:
self.X_transform = nn.Identity()
if y_transform is not None:
self.y_transform = y_transform
else:
self.y_transform = nn.Identity()
def train(self, dataset, num_epochs, loss_fn, val_dataset=None, val_metric_fn=None):
train_losses = []
val_metrics = []
if val_metric_fn is None:
val_metric_fn = loss_fn
pbar = tqdm(range(num_epochs))
for i in pbar:
data_iter = iter(dataset)
self.model.train()
for X, y_target in data_iter:
X = X.to(self.device)
y_target = y_target.to(self.device)
self.optim.zero_grad()
y_pred = self.y_transform(self.model(self.X_transform(X)))
loss = loss_fn(y_pred, y_target)
loss.backward()
self.optim.step()
train_losses.append(loss)
pbar.set_description('Batch Loss: {} Epoch'.format(loss))
if val_dataset is not None:
val_iter = iter(val_dataset)
self.model.eval()
val_metric = None
val_count = 0
for X, y_target in val_iter:
X = X.to(self.device)
y_target = y_target.to(self.device)
with torch.no_grad():
y_pred = self.y_transform(self.model(self.X_transform(X)))
if val_metric is None:
val_metric = val_metric_fn(y_pred, y_target)
else:
val_metric += val_metric_fn(y_pred, y_target)
val_count += 1
val_metrics.append(val_metric / val_count)
if val_dataset is not None:
return torch.stack(train_losses).detach().to('cpu').numpy(),\
torch.stack(val_metrics).detach().to('cpu').numpy()
else:
return torch.stack(train_losses).detach().to('cpu').numpy()
def forward(self, X):
return self.predict(X)
def predict(self, X):
self.model.eval()
if (len(X.shape) == 1):
X = X.view(1, -1)
one_input = True
else:
one_input = False
y_pred = self.y_transform(self.model(self.X_transform(X)))
if one_input:
y_pred = y_pred.view(-1)
return y_pred
|
{"/nnacc/predictor.py": ["/nnacc/nn.py"], "/nnacc/__init__.py": ["/nnacc/predictor.py", "/nnacc/nn.py", "/nnacc/sampler.py"]}
|
26,038
|
pierfied/nnacc
|
refs/heads/main
|
/nnacc/nn.py
|
from torch import nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_size, out_size):
super(ResBlock, self).__init__()
self.layer1 = nn.Linear(in_size, out_size)
self.layer2 = nn.Linear(out_size, out_size)
if in_size == out_size:
self.skip_layer = nn.Identity()
else:
self.skip_layer = nn.Linear(in_size, out_size, bias=False)
def forward(self, x):
h = self.layer1(F.relu(x))
y = self.layer2(F.relu(h)) * 0.1 + self.skip_layer(x)
return y
|
{"/nnacc/predictor.py": ["/nnacc/nn.py"], "/nnacc/__init__.py": ["/nnacc/predictor.py", "/nnacc/nn.py", "/nnacc/sampler.py"]}
|
26,039
|
pierfied/nnacc
|
refs/heads/main
|
/nnacc/__init__.py
|
from .predictor import *
from .HMCSampler import *
from .nn import *
from .sampler import *
|
{"/nnacc/predictor.py": ["/nnacc/nn.py"], "/nnacc/__init__.py": ["/nnacc/predictor.py", "/nnacc/nn.py", "/nnacc/sampler.py"]}
|
26,040
|
rpeace/contagion
|
refs/heads/master
|
/HeadlineGrabber.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 13:01:05 2015
@author: rob
"""
import random
from bs4 import BeautifulSoup
import urllib2
import datetime
import calendar
class HeadlineGrabber:
def __init__(self):
return
def get_headline(self, date):
month = calendar.month_name[date.month]
url = "http://en.wikipedia.org/wiki/Portal:Current_events/"+str(date.year) +"_"+ month + "_"+str(date.day)
print url
soup = BeautifulSoup(urllib2.urlopen(url))
headline = soup.find("table", class_="vevent").find("li").text
headline = headline.split("(")[0]
return headline + " Market contagion results."
|
{"/Collector.py": ["/connection.py"]}
|
26,041
|
rpeace/contagion
|
refs/heads/master
|
/dbtest.py
|
import mysql.connector
# Connection
connection = mysql.connector.connect(user='rpeace', password='3Q5CmaE7',
host='99.254.1.29',
database='Stocks')
# Cursor
cursor = connection.cursor()
# Execute Query
cursor.execute("SELECT * FROM Region;")
# Close Connection
connection.close()
|
{"/Collector.py": ["/connection.py"]}
|
26,042
|
rpeace/contagion
|
refs/heads/master
|
/connection.py
|
#!/usr/bin/python
import sys
import datetime
import _mysql
# Main
def main():
print("[STOCKS]")
print(get_stocks("TSE", "T", datetime.datetime(2014, 01, 01), datetime.datetime(2014, 12, 31), "", "", ""))
print("REGIONS]")
print(get_regions())
print("[COUNTRIES]")
print(get_countries(""))
print("[SECTORS]")
print(get_sectors())
print("[Markets]")
print(get_markets())
print("[Symbols]")
print(get_symbols(""))
# Get Stock Data
def get_stocks(exchange, symbol, start, end, region, country, sector):
query = "SELECT Company.companyID, Exchange.exchangeCode, Company.Symbol, "
query += "Date.year, Date.month, Date.day, Stock.close FROM Company "
query += "INNER JOIN Country ON Company.countryID = Country.countryID "
query += "INNER JOIN Region ON Region.regionID = Country.regionID "
query += "INNER JOIN Sector ON Sector.sectorID = Company.sectorID "
query += "INNER JOIN Exchange ON Exchange.exchangeID = Company.exchangeID "
query += "INNER JOIN Stock ON Stock.companyID = Company.companyID "
query += "INNER JOIN Date ON Date.dateID = Stock.dateID "
query += " WHERE "
# Symbol
if symbol != "":
query += "Company.symbol = '" + symbol + "' AND ";
# Country
if country != "":
query += "Country.country = '" + country + "' AND ";
# Region
if region != "":
query += " Region.region = '" + region + "' AND ";
# Sector
if sector != "":
query += " Sector.sector = '" + sector + "' AND ";
# Exchange
if exchange != "":
query += " Exchange.exchangeCode = '" + exchange + "' AND ";
# Date
d1 = str(start.year) + str(start.month).zfill(2) + str(start.day).zfill(2)
d2 = str(end.year) + str(end.month).zfill(2) + str(end.day).zfill(2)
query += "CONCAT(Date.year, LPAD(Date.month, 2, '0'), LPAD(Date.day, 2, '0')) BETWEEN "
query += d1 + " AND " + d2
# End Query
query += ";"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return rows
# Get Regions
def get_regions():
query = "SELECT region FROM Region;"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return sorted([ seq[0] for seq in rows ])
# Get Countries
def get_countries(region):
query = "SELECT country FROM Country INNER JOIN Region ON Country.regionID = Region.regionID"
# Region
if region != "":
query += " WHERE Region.region = '" + region + "'"
# End Query
query += ";"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return sorted([ seq[0] for seq in rows ])
# Get Sectors
def get_sectors():
query = "SELECT sector FROM Sector;"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return sorted([ seq[0] for seq in rows ])
# Get Markets
def get_markets():
query = "SELECT exchangeCode FROM Exchange;"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return sorted([ seq[0] for seq in rows ])
# Get Symbols
def get_symbols(market):
query = "SELECT symbol FROM Company INNER JOIN Exchange ON Company.exchangeID = Exchange.exchangeID"
# Region
if market != "":
query += " WHERE Exchange.exchange = '" + market + "'"
# End Query
query += ";"
# Query Database
try:
# Connection
connection = _mysql.connect('99.254.1.29', 'rpeace', '3Q5CmaE7', 'Stocks')
# Query
connection.query(query)
# Result
result = connection.use_result()
# Rows
rows = result.fetch_row(result.num_rows())
# Query Failed
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# Close Connection
finally:
if connection:
connection.close()
return sorted(list(set([ seq[0] for seq in rows ])))
# Execute Main
if __name__ == "__main__":
main()
|
{"/Collector.py": ["/connection.py"]}
|
26,043
|
rpeace/contagion
|
refs/heads/master
|
/Collector.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 10:15:44 2015
@author: rob
"""
import pandas.io.data as web
import pandas as pd
import datetime
from datetime import timedelta
from dateutil import parser as dateparser
import connection
class Collector:
def __init__(self):
return
def get_stock_data(self, exchange, symbol, start, end, region, country, sector):
if region == "(All)":
region = ""
if country == "(All)":
country = ""
if sector == "(All)":
sector = ""
if exchange == "(All)":
exchange = ""
s = dateparser.parse(start) - timedelta(weeks=1)
e = dateparser.parse(end)
cdata = connection.get_stocks(exchange, symbol, s, e, region, country, sector)
IDs = list(set([line[0] for line in cdata]))
dates = []
for line in cdata:
dates.append(datetime.datetime(int(line[3]), int(line[4]), int(line[5])))
dates = sorted(list(set(dates)))
returns = pd.Series(0.0, dates[1:])
dataDict = {}
for ID in IDs:
dataDict[ID] = []
for line in cdata:
if line[0] == ID:
dataDict[ID].append(line)
for idx, line in enumerate(dataDict[ID]):
if idx > 0:
returns[datetime.datetime(int(line[3]), int(line[4]), int(line[5]))] += (float(dataDict[ID][idx][6]) - float(dataDict[ID][idx-1][6])) / float(dataDict[ID][idx][6])
returns = returns.div(len(IDs))
return returns
# f['Return'] = pd.Series()
# f.Return = (f.Close-f.Close.shift(1))/f.Close
# return f[dateparser.parse(start):]
def get_nyse_stock_data(self, start, end):
symbols = ["AAPL", "XOM", "MSFT", "JNJ", "GE", "WFC", "PG", "JPM", "PFE"]
return self.get_average_stock_data(symbols, start, end)
def get_sehk_stock_data(self, start, end):
symbols = ["0001.HK", "0002.HK", "0003.HK", "0004.HK", "0005.HK", "0006.HK", "0007.HK", "0008.HK", "0009.HK", "0010.HK"]
return self.get_average_stock_data(symbols, start, end)
def get_lse_stock_data(self, start, end):
symbols = ["III.L", "ABF.L", "ADN.L", "ADM.L", "AGK.L", "AAL.L", "ANTO.L", "ARM.L", "AHT.L", "BAB.L"]
return self.get_average_stock_data(symbols, start, end)
def get_average_stock_data(self, symbols, start, end):
stocks = {}
s = dateparser.parse(start) - timedelta(weeks=1)
e = dateparser.parse(end)
for symbol in symbols:
stocks[symbol] = web.DataReader(symbol, "yahoo", s, e)
stocks[symbol]['Return'] = pd.Series()
stocks[symbol].Return = (stocks[symbol].Close-stocks[symbol].Close.shift(1))/stocks[symbol].Close
stocks[symbol] = stocks[symbol][dateparser.parse(start):]
panel = pd.Panel(stocks)
avg = panel.mean(axis=0)
return avg
c = Collector()
c.get_stock_data("", "", "2003-01-01", "2003-12-31", "", "United States", "Technology")
|
{"/Collector.py": ["/connection.py"]}
|
26,044
|
rpeace/contagion
|
refs/heads/master
|
/Main.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 10:43:18 2015
@author: rob
"""
from Collector import *
import connection
from HeadlineGrabber import *
import pandas as pd
import sys
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from arch import arch_model
import statsmodels.api as sm
import scipy.stats
import seaborn as sns
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
staySmall = QtGui.QSizePolicy()
staySmall.setHorizontalPolicy(QtGui.QSizePolicy.Fixed)
staySmall.setVerticalPolicy(QtGui.QSizePolicy.Fixed)
goBig = QtGui.QSizePolicy()
goBig.setHorizontalPolicy(QtGui.QSizePolicy.Ignored)
goBig.setVerticalPolicy(QtGui.QSizePolicy.Ignored)
self.canvas.setSizePolicy(goBig)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Just some button connected to `plot` method
self.button = QtGui.QPushButton('Plot')
self.button.clicked.connect(self.plot)
self.sector1 = QtGui.QComboBox()
self.sector1.addItem("(All)")
self.region1 = QtGui.QComboBox()
self.region1.currentIndexChanged.connect(self.update_country1)
self.region1.addItem("(All)")
self.country1 = QtGui.QComboBox()
self.country1.addItem("(All)")
self.market1 = QtGui.QComboBox()
self.market1.addItem("(All)")
self.symbol1 = QtGui.QLineEdit()
self.sector2 = QtGui.QComboBox()
self.sector2.addItem("(All)")
self.region2 = QtGui.QComboBox()
self.region2.currentIndexChanged.connect(self.update_country2)
self.region2.addItem("(All)")
self.country2 = QtGui.QComboBox()
self.country2.addItem("(All)")
self.market2 = QtGui.QComboBox()
self.market2.addItem("(All)")
self.symbol2 = QtGui.QLineEdit()
for sector in connection.get_sectors():
self.sector1.addItem(sector)
self.sector2.addItem(sector)
for region in connection.get_regions():
self.region1.addItem(region)
self.region2.addItem(region)
for country in connection.get_countries(""):
self.country1.addItem(country)
self.country2.addItem(country)
for market in connection.get_markets():
self.market1.addItem(market)
self.market2.addItem(market)
self.models = QtGui.QButtonGroup(self)
self.noModel = QtGui.QCheckBox('No modelling')
self.models.addButton(self.noModel)
self.useArma = QtGui.QCheckBox('Model with ARMA')
self.models.addButton(self.useArma)
self.useArch = QtGui.QCheckBox('Model with ARCH')
self.models.addButton(self.useArch)
self.useGarch = QtGui.QCheckBox('Model with GARCH')
self.models.addButton(self.useGarch)
self.startdate = QtGui.QLineEdit(self)
self.startdate.setText("2010-01-01")
self.enddate = QtGui.QLineEdit(self)
self.enddate.setText("2012-01-01")
self.s = QtGui.QLineEdit(self)
self.s.setText("2")
self.samples = QtGui.QLineEdit(self)
self.samples.setText("5")
self.average = QtGui.QLabel(" Average: ")
self.std = QtGui.QLabel(" Std: ")
self.zscore = QtGui.QLabel(" ")
self.conclusion = QtGui.QLabel(" ")
self.average.setSizePolicy(staySmall)
self.std.setSizePolicy(staySmall)
self.zscore.setSizePolicy(staySmall)
self.conclusion.setSizePolicy(staySmall)
# set the layoutcovariance
input1layout = QtGui.QVBoxLayout()
input1layout.addWidget(QtGui.QLabel("Sector"))
input1layout.addWidget(self.sector1)
input1layout.addWidget(QtGui.QLabel("Region"))
input1layout.addWidget(self.region1)
input1layout.addWidget(QtGui.QLabel("Country"))
input1layout.addWidget(self.country1)
input1layout.addWidget(QtGui.QLabel("Market"))
input1layout.addWidget(self.market1)
input1layout.addWidget(QtGui.QLabel("Symbol"))
input1layout.addWidget(self.symbol1)
input2layout = QtGui.QVBoxLayout()
input2layout.addWidget(QtGui.QLabel("Sector"))
input2layout.addWidget(self.sector2)
input2layout.addWidget(QtGui.QLabel("Region"))
input2layout.addWidget(self.region2)
input2layout.addWidget(QtGui.QLabel("Country"))
input2layout.addWidget(self.country2)
input2layout.addWidget(QtGui.QLabel("Market"))
input2layout.addWidget(self.market2)
input2layout.addWidget(QtGui.QLabel("Symbol"))
input2layout.addWidget(self.symbol2)
# User input that is not specific to each market. Also a button
input3layout = QtGui.QVBoxLayout()
input3layout.addWidget(QtGui.QLabel("Start date"))
input3layout.addWidget(self.startdate)
input3layout.addWidget(QtGui.QLabel("End date"))
input3layout.addWidget(self.enddate)
input3layout.addWidget(QtGui.QLabel("Smoothing level (# of samples)"))
input3layout.addWidget(self.s)
input3layout.addWidget(QtGui.QLabel("Rolling correlation samples"))
input3layout.addWidget(self.samples)
input3layout.addWidget(self.noModel)
input3layout.addWidget(self.useArma)
input3layout.addWidget(self.useArch)
input3layout.addWidget(self.button)
menuLayout = QtGui.QGridLayout()
menuLayout.addWidget(QtGui.QLabel("Select market data"))
menuLayout.addLayout(input1layout,1,0)
menuLayout.addLayout(input2layout,1,1)
menuLayout.addLayout(input3layout, 4, 0, 1, 2)
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(self.toolbar)
plotLayout.addWidget(self.canvas)
plotLayout.addWidget(self.average)
plotLayout.addWidget(self.std)
plotLayout.addWidget(self.zscore)
plotLayout.addWidget(self.conclusion)
layout = QtGui.QGridLayout()
layout.addLayout(menuLayout,1,0)
layout.addLayout(plotLayout,1,1)
self.setLayout(layout)
self.axes1 = self.figure.add_subplot(211)
self.axes2 = self.figure.add_subplot(212)
def update_country1(self):
self.country1.clear()
self.country1.addItem("(All)")
if self.region1.currentText() == "(All)":
for country in connection.get_countries(""):
self.country1.addItem(country)
else:
for country in connection.get_countries(self.region1.currentText()):
self.country1.addItem(country)
return
def update_country2(self):
self.country2.clear()
self.country2.addItem("(All)")
if self.region2.currentText() == "(All)":
for country in connection.get_countries(""):
self.country2.addItem(country)
else:
for country in connection.get_countries(self.region2.currentText()):
self.country2.addItem(country)
return
def find_high_region(self, corrData):
topAvg = -1.0
topStd = -1.0
startDates = []
for date in corrData.axes[0]:
if date < corrData.axes[0][-1]-timedelta(days=30):
startDates.append(date)
for date in startDates:
avg = corrData[date:date+timedelta(days=30)].mean()
std = corrData[date:date+timedelta(days=30)].std()
if avg-std > topAvg-topStd:
topAvg = avg
topStd = std
topDate = date
return (topDate, corrData[topDate:topDate+timedelta(days=30)])
def find_low_region(self, corrData):
topAvg = 1.0
topStd = 1.0
startDates = []
for date in corrData.axes[0]:
if date < corrData.axes[0][-1]-timedelta(days=60):
startDates.append(date)
for date in startDates:
avg = corrData[date:date+timedelta(days=60)].mean()
std = corrData[date:date+timedelta(days=60)].std()
if avg-std < topAvg-topStd:
topAvg = avg
topStd = std
topDate = date
return (topDate, corrData[topDate:topDate+timedelta(days=30)])
def plot(self):
# retrieve stock data
c = Collector()
region1 = self.region1.currentText()
region2 = self.region2.currentText()
market1 = self.market1.currentText()
market2 = self.market2.currentText()
sector1 = self.sector1.currentText()
sector2 = self.sector2.currentText()
country1 = self.country1.currentText()
country2 = self.country2.currentText()
s1 = self.symbol1.text()
s2 = self.symbol2.text()
samples = int(self.s.text())
covsamples = int(self.samples.text())
start = self.startdate.text()
end = self.enddate.text()
returnSeries1 = c.get_stock_data(market1, s1, start, end, region1, country1, sector1)
returnSeries2 = c.get_stock_data(market2, s2, start, end, region2, country2, sector2)
pltdata = pd.DataFrame()
pltdata["1"] = pd.rolling_mean(returnSeries1,samples)
pltdata["2"] = pd.rolling_mean(returnSeries2,samples)
pltdata['Corr'] = pd.rolling_corr(pltdata["1"], pltdata["2"], covsamples)
# if self.useGarch.isChecked():
# am = arch_model(pltdata['Corr'][pd.notnull(pltdata['Corr'])], lags=5, mean="ARX")
# res = am.fit()
# pltdata['Corr2'] = res.resid
if self.useArch.isChecked():
am1 = arch_model(pltdata['Corr'][pd.notnull(pltdata['Corr'])], vol='ARCH', lags=5, mean="ARX")
res = am1.fit(iter=5)
pltdata['Corr2'] = res.resid
#Trying to fit an ARMA model causes an unknown lockup. Disabled for now.
elif self.useArma.isChecked():
arma_res = sm.tsa.ARMA(pltdata['Corr'], (5,0))
arma_resres = arma_res.fit()
pltdata['Corr2'] = arma_resres.resid
else:
pltdata['Corr2'] = pltdata['Corr']
high_data = self.find_high_region(pltdata['Corr2'])
low_data = self.find_low_region(pltdata['Corr2'])
pscore = scipy.stats.ttest_ind(high_data[1], low_data[1])
print "T test result:", pscore
if pscore[1] < 0.05:
# h = HeadlineGrabber()
# headline = h.get_headline(high_data[0].to_datetime())
self.zscore.setText("Contagion found at "+str(high_data[0])+" with P-score " + str(pscore[1]) + " ")
# self.conclusion.setText("Headline for article: "+headline)
else:
self.zscore.setText("No contagion found")
# self.conclusion.setText(" ")
pltdata['mean'] = [pltdata['Corr2'].mean()]*len(pltdata['Corr2'])
pltdata['upperstd'] = [pltdata['Corr2'].mean()+pltdata['Corr2'].std()]*len(pltdata['Corr2'])
pltdata['lowerstd'] = [pltdata['Corr2'].mean()-pltdata['Corr2'].std()]*len(pltdata['Corr2'])
print pltdata
self.axes1.cla()
self.axes2.cla()
pltdata["1"].plot(ax=self.axes1, legend=True)
pltdata["2"].plot(ax=self.axes1, legend=True)
pltdata['Corr2'].plot(ax=self.axes2, legend=True)
pltdata['mean'].plot(ax=self.axes2, legend=True)
pltdata['upperstd'].plot(ax=self.axes2, legend=True)
pltdata['lowerstd'].plot(ax=self.axes2, legend=True)
self.std.setText(" Std: %0.3f" % pltdata['Corr2'].std())
self.average.setText(" Average: %0.3f" % pltdata['Corr2'].mean())
# refresh canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_())
|
{"/Collector.py": ["/connection.py"]}
|
26,045
|
kkthaker/python-project
|
refs/heads/master
|
/Detection/detect2.py
|
from __future__ import print_function
#==================================================================================================================
#Importing Necessarry or Required APIS or Packages:-
#==================================================================================================================
#For Some Array Operation:-
import numpy as np
#For Some Graphical Purpose:-
import matplotlib.pyplot as plt
import matplotlib.patches as patches
#For Composing the Pictures ofr Images in Different Configuration:-
from skimage import io
#To define techniques to match specified patterns according to rules related to Linux:-
import glob
#For Conversion of Nd Array's (Tensors) to Maatrices:-
from sklearn.utils.linear_assignment_ import linear_assignment
#We have to Work with Video For that time Functions and Qualities are Required:-
import time
#This is Suitable for Current Operating System of Any Computer that's Why For Some File Manipulation it is used:-
import os.path
#To make sense of how to parse Arguements out of sys.argv:-
import argparse
#It is a just-in-time compiler for Python that works best on
#code that uses NumPy arrays and functions, and loops:-
from numba import jit
#To Create Some Constant Velocity Model For Common Vehicles:-
from filterpy.kalman import KalmanFilter
#================================================================================================
#To Computes IUO between two bboxes in the form [x1,y1,x2,y2]:-
#================================================================================================
def iou(bb_test,bb_gt):
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])
+ (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)
return(o)
#==============================================================================================
#To Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where
#x,y is the centre of the box and s is the scale/area and r is the Aspect Ratio:-
#==============================================================================================
def convert_bbox_to_z(bbox):
w = bbox[2]-bbox[0]
h = bbox[3]-bbox[1]
x = bbox[0]+w/2.
y = bbox[1]+h/2.
s = w*h #Here,This Scale is just Area:-
r = w/float(h)
return np.array([x,y,s,r]).reshape((4,1))
#==============================================================================================
#To Takes a bounding box in the centre form [x,y,s,r] and returns it in the form [x1,y1,x2,y2]
#Where x1,y1 is the top left and x2,y2 is the bottom right:-
#==============================================================================================
def convert_x_to_bbox(x,score=None):
w = np.sqrt(x[2]*x[3])
h = x[2]/w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
#==============================================================================================
#Here,This class represents the internel state of individual tracked objects observed as bbox:-
#==============================================================================================
class KalmanBoxTracker(object):
count = 0
#==============================================================================================
#Initialises a tracker using initial bounding box:-
#==============================================================================================
def __init__(self,bbox):
#To Define Constant Velocity Model:-
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10.
self.kf.P[4:,4:] *= 1000. #To Give High Uncertainty to the Unobservable Initial Velocities:-
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.01
self.kf.Q[4:,4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
#================================================================================================
#To Updates the state vector with observed bbox.:-
#================================================================================================
def update(self,bbox):
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
#================================================================================================
#To Advances the state vector and returns the predicted bounding box estimate.:-
#================================================================================================
def predict(self):
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
#================================================================================================
#To Returns the Current Bounding box Estimate:-
#================================================================================================
def get_state(self):
return convert_x_to_bbox(self.kf.x)
#================================================================================================
#For Assigns detections to tracked object(both represented as bounding boxes)and Returns 3 lists
#of matches, unmatched_detections and unmatched_trackers
#================================================================================================
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
iou_matrix[d,t] = iou(det,trk)
matched_indices = linear_assignment(-iou_matrix)
#We can do this also:-matched_indices = np.array(matched_indices).reshape((-1,2))
#We can do this also:-print(iou_matrix.shape,matched_indices.shape)
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#For Filter out matched with low IOU:-
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
#Some Sets Key Parameters For SORT:-
class Sort(object):
def __init__(self,max_age=10,min_hits=3):
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
self.counts = 0
#========================================================================================================
#dets:-A Numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
#Requires:-Here,This method must be called once for each frame even with empty detections.
#It Returns the a similar array, where the last column is the object ID.
#NOTE:-Here,The Number of objects returned may differ from the number of detections provided.
#========================================================================================================
def update(self,dets):
self.frame_count += 1
#To Get Predicted Locations From Existing Trackers:-
trks = np.zeros((len(self.trackers),5))
to_del = []
ret = []
for t,trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
#To Update matched trackers with assigned detections:-
for t,trk in enumerate(self.trackers):
if(t not in unmatched_trks):
d = matched[np.where(matched[:,1]==t)[0],0]
trk.update(dets[d,:][0])
#To Create and Initialise new trackers for unmatched detections:-
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
#Here,This is For Remove dead tracklet:-
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
self.counts = KalmanBoxTracker.count
if(len(ret)>0):
return np.concatenate(ret)
return np.empty((0,5))
#==================================================================================================
#For Parse Input Arguements:-
#==================================================================================================
def parse_args():
parser = argparse.ArgumentParser(description='SORT demo')
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
#For All Train:-
args = parse_args()
display = args.display
phase = 'train'
total_time = 0.0
total_frames = 0
colours = np.random.rand(32,3) #Here, It is used only for display:-
if(display):
if not os.path.exists('mot_benchmark'):
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
exit()
plt.ion()
fig = plt.figure()
if not os.path.exists('output'):
os.makedirs('output')
for seq in sequences:
mot_tracker = Sort() #To Create instance of the SORT tracker:-
seq_dets = np.loadtxt('data/%s/det.txt'%(seq),delimiter=',') #load detections
with open('output/%s.txt'%(seq),'w') as out_file:
print("Processing %s."%(seq))
for frame in range(int(seq_dets[:,0].max())):
frame += 1 #For detection and frame numbers begin at 1:-
dets = seq_dets[seq_dets[:,0]==frame,2:7]
dets[:,2:4] += dets[:,0:2] #To Convert to [x1,y1,w,h] to [x1,y1,x2,y2]:-
total_frames += 1
if(display):
ax1 = fig.add_subplot(111, aspect='equal')
fn = 'mot_benchmark/%s/%s/img1/%06d.jpg'%(phase,seq,frame)
im =io.imread(fn)
ax1.imshow(im)
plt.title(seq+' Tracked Targets')
start_time = time.time()
trackers = mot_tracker.update(dets)
cycle_time = time.time() - start_time
total_time += cycle_time
for d in trackers:
print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
if(display):
d = d.astype(np.int32)
ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
ax1.set_adjustable('box-forced')
if(display):
fig.canvas.flush_events()
plt.draw()
ax1.cla()
print("Total Tracking took: %.3f for %d frames or %.1f FPS"%(total_time,total_frames,total_frames/total_time))
if(display):
print("Note: to get real runtime results run without the option: --display")
#===================================================================================================================
|
{"/Main.py": ["/Detection/detect2.py"]}
|
26,046
|
kkthaker/python-project
|
refs/heads/master
|
/Main.py
|
from __future__ import division
#=======================================================================================
#Importing Necessarry or Required APIS or Packages:-
#=======================================================================================
#To read the Video:-
import cv2
#For GUI Generation and For its Work Purpose:-
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5 import QtCore, QtGui, QtWidgets
#For Detecting the Objects:-
from Config import *
#For Image Manipulation after Saving them in different Format:-
from PIL import Image
#To Work Some Operation on DeepCopy of Something:-
import copy
#For Importing Other ".py" Files of My Project:-
from Detection.detect2 import *
from Detection.detect1 import *
#To Compute Tensor Computing (Multidimensional Array Computing):-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#For Some Array Operation:-
import numpy as np
#For Some Graphical Purpose:-
import matplotlib.pyplot as plt
import matplotlib.patches as patches
#=================================================================
#Total type of Objects Which are Part of that Video (Input) With their RGB Coordinates:-
#=================================================================
names = ["bicycle","bus","car","motorbike","truck"]
color_dict = {"bicycle": (179, 52, 255),
"bus": (255, 191, 0),
"car": (127, 255, 0),
"motorbike": (0, 140, 255),
"truck": (0, 215, 255)}
class CounterThread(QThread):
sin_counterResult = pyqtSignal(np.ndarray)
sin_runningFlag = pyqtSignal(int)
sin_videoList = pyqtSignal(list)
sin_countArea = pyqtSignal(list)
sin_done = pyqtSignal(int)
sin_counter_results = pyqtSignal(list)
sin_pauseFlag = pyqtSignal(int)
def __init__(self,model,class_names,device):
super(CounterThread,self).__init__()
self.model = model
self.class_names = class_names
self.device = device
self.permission = names
self.colorDict = color_dict
# create instance of SORT
self.mot_tracker = Sort(max_age=10, min_hits=2)
self.countArea = None
self.running_flag = 0
self.pause_flag = 0
self.videoList = []
self.last_max_id = 0
self.history = {}
self.sin_runningFlag.connect(self.update_flag)
self.sin_videoList.connect(self.update_videoList)
self.sin_countArea.connect(self.update_countArea)
self.sin_pauseFlag.connect(self.update_pauseFlag)
self.save_dir = "results"
if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)
def run(self):
for video in self.videoList:
self.last_max_id = 0
cap = cv2.VideoCapture(video)
#out = cv2.VideoWriter(os.path.join(self.save_dir,video.split("/")[-1]), cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 10, (1920, 1080))
frame_count = 0
while cap.isOpened():
#To print(frame_count) which is Frame Per Second:-
if self.running_flag:
if not self.pause_flag:
ret, frame = cap.read()
if ret:
if frame_count % 3 == 0:
a1 = time.time()
frame = self.counter(self.permission, self.colorDict, frame,np.array(self.countArea), self.mot_tracker, video)
self.sin_counterResult.emit(frame)
#out.write(frame)
a2 = time.time()
print(f"fps: {1 / (a2 - a1):.2f}")
frame_count += 1
else:
break
else:
time.sleep(0.1)
else:
break
#For Restart Count for Each Video:-
KalmanBoxTracker.count = 0
cap.release()
#out.release()
if not self.running_flag:
break
if self.running_flag:
self.sin_done.emit(1)
def update_pauseFlag(self,flag):
self.pause_flag = flag
def update_flag(self,flag):
self.running_flag = flag
def update_videoList(self, videoList):
print("Update videoList!")
self.videoList = videoList
def update_countArea(self,Area):
print("Update countArea!")
self.countArea = Area
def counter(self, permission, colorDict, frame, CountArea, mot_tracker, videoName):
#To Painting the Area:-
AreaBound = [min(CountArea[:, 0]), min(CountArea[:, 1]), max(CountArea[:, 0]), max(CountArea[:, 1])]
painting = np.zeros((AreaBound[3] - AreaBound[1], AreaBound[2] - AreaBound[0]), dtype=np.uint8)
CountArea_mini = CountArea - AreaBound[0:2]
cv2.fillConvexPoly(painting, CountArea_mini, (1,))
objects = yolo_prediction(self.model,self.device,frame,self.class_names)
objects = filter(lambda x: x[0] in permission, objects)
objects = filter(lambda x: x[1] > 0.5,objects)
objects = list(filter(lambda x: pointInCountArea(painting, AreaBound, [int(x[2][0]), int(x[2][1] + x[2][3] / 2)]),objects))
#To Filter out repeat bbox:-
objects = filiter_out_repeat(objects)
detections = []
for item in objects:
detections.append([int(item[2][0] - item[2][2] / 2),
int(item[2][1] - item[2][3] / 2),
int(item[2][0] + item[2][2] / 2),
int(item[2][1] + item[2][3] / 2),
item[1]])
track_bbs_ids = mot_tracker.update(np.array(detections))
#==============================================================================================================
#For Painting an Area:-
#==============================================================================================================
for i in range(len(CountArea)):
cv2.line(frame, tuple(CountArea[i]), tuple(CountArea[(i + 1) % (len(CountArea))]), (0, 0, 255), 2)
if len(track_bbs_ids) > 0:
for bb in track_bbs_ids: #add all bbox to history
id = int(bb[-1])
objectName = get_objName(bb, objects)
if id not in self.history.keys(): #add new id
self.history[id] = {}
self.history[id]["no_update_count"] = 0
self.history[id]["his"] = []
self.history[id]["his"].append(objectName)
else:
self.history[id]["no_update_count"] = 0
self.history[id]["his"].append(objectName)
for i, item in enumerate(track_bbs_ids):
bb = list(map(lambda x: int(x), item))
id = bb[-1]
x1, y1, x2, y2 = bb[:4]
his = self.history[id]["his"]
result = {}
for i in set(his):
result[i] = his.count(i)
res = sorted(result.items(), key=lambda d: d[1], reverse=True)
objectName = res[0][0]
boxColor = colorDict[objectName]
cv2.rectangle(frame, (x1, y1), (x2, y2), boxColor, thickness=2)
cv2.putText(frame, str(id) + "_" + objectName, (x1 - 1, y1 - 3), cv2.FONT_HERSHEY_COMPLEX, 0.7,
boxColor,
thickness=2)
counter_results = []
videoName = videoName.split('/')[-1]
removed_id_list = []
for id in self.history.keys(): #extract id after tracking
self.history[id]["no_update_count"] += 1
if self.history[id]["no_update_count"] > 5:
his = self.history[id]["his"]
result = {}
for i in set(his):
result[i] = his.count(i)
res = sorted(result.items(), key=lambda d: d[1], reverse=True)
objectName = res[0][0]
counter_results.append([videoName,id,objectName])
#del id
removed_id_list.append(id)
for id in removed_id_list:
_ = self.history.pop(id)
if len(counter_results):
self.sin_counter_results.emit(counter_results)
return frame
def emit_timeCode(self,time_code):
self.sin_timeCode.emit(time_code)
def getTwoDimensionListIndex(L,value,pos):
for i in range(len(L)):
if L[i][pos] == value:
return i
return -1
def filiter_out_repeat(objects):
objects = sorted(objects,key=lambda x: x[1])
l = len(objects)
new_objects = []
if l > 1:
for i in range(l-1):
flag = 0
for j in range(i+1,l):
x_i, y_i, w_i, h_i = objects[i][2]
x_j, y_j, w_j, h_j = objects[j][2]
box1 = [int(x_i - w_i / 2), int(y_i - h_i / 2), int(x_i + w_i / 2), int(y_i + h_i / 2)]
box2 = [int(x_j - w_j / 2), int(y_j - h_j / 2), int(x_j + w_j / 2), int(y_j + h_j / 2)]
if cal_iou(box1,box2) >= 0.7:
flag = 1
break
#if No repeatation then:-
if not flag:
new_objects.append(objects[i])
#For Adding the last one object:-
new_objects.append(objects[-1])
else:
return objects
return list(tuple(new_objects))
def cal_iou(box1,box2):
x1 = max(box1[0],box2[0])
y1 = max(box1[1],box2[1])
x2 = min(box1[2],box2[2])
y2 = min(box1[3],box2[3])
i = max(0,(x2-x1))*max(0,(y2-y1))
u = (box1[2]-box1[0])*(box1[3]-box1[1]) + (box2[2]-box2[0])*(box2[3]-box2[1]) - i
iou = float(i)/float(u)
return iou
def get_objName(item,objects):
iou_list = []
for i,object in enumerate(objects):
x, y, w, h = object[2]
x1, y1, x2, y2 = int(x - w / 2), int(y - h / 2), int(x + w / 2), int(y + h / 2)
iou_list.append(cal_iou(item[:4],[x1,y1,x2,y2]))
max_index = iou_list.index(max(iou_list))
return objects[max_index][0]
def pointInCountArea(painting, AreaBound, point):
h,w = painting.shape[:2]
point = np.array(point)
point = point - AreaBound[:2]
if point[0] < 0 or point[1] < 0 or point[0] >= w or point[1] >= h:
return 0
else:
return painting[point[1],point[0]]
def resize(image, size):
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
#================================================================================================
#Here the Below Code lines are For Reading a Video framewise (Taking it as image through opencv)
#================================================================================================
def yolo_prediction(model, device, image,class_names):
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
imgs = transforms.ToTensor()(Image.fromarray(image))
c, h, w = imgs.shape
img_sacle = [w / 416, h / 416, w / 416, h / 416]
imgs = resize(imgs, 416)
imgs = imgs.unsqueeze(0).to(device)
model.eval()
with torch.no_grad():
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=0.5, nms_thres=0.45)
# print(outputs)
objects = []
try:
outputs = outputs[0].cpu().data
for i, output in enumerate(outputs):
item = []
item.append(class_names[int(output[-1])])
item.append(float(output[4]))
box = [int(value * img_sacle[i]) for i, value in enumerate(output[:4])]
x1,y1,x2,y2 = box
x = int((x2+x1)/2)
y = int((y1+y2)/2)
w = x2-x1
h = y2-y1
item.append([x,y,w,h])
objects.append(item)
except:
pass
return objects
#=============================================================================================
#Constructs module list of layer blocks from module configuration in module_defs:-
#=============================================================================================
def create_modules(module_defs):
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
#To Define detection layer:-
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
#For Register module list and number of output filters:-
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
#===========================================================================================
#nn.Upsample is deprecated:-
#===========================================================================================
class Upsample(nn.Module):
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
#===========================================================================================
#Placeholder for 'route' and 'shortcut' layers:-
#===========================================================================================
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
#===================================================================================
#Detection Layer Code:-
#===================================================================================
class YOLOLayer(nn.Module):
def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None):
#For Tensors for cuda support:-
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
#To Get Outputs:-
x = torch.sigmoid(prediction[..., 0]) #Center x
y = torch.sigmoid(prediction[..., 1]) #Center y
w = prediction[..., 2] #Width
h = prediction[..., 3] #Height
pred_conf = torch.sigmoid(prediction[..., 4]) #Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) #Cls pred.
#If grid size does not match current we compute new offsets:-
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
#Add offset and scale with anchors:-
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
pred_boxes=pred_boxes,
pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
#Loss:-Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
#Some Metrics Stuff:-
cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * class_mask * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"cls": to_cpu(loss_cls).item(),
"cls_acc": to_cpu(cls_acc).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss
#===================================================================
#Yolov3 Object Detection Model Which Can detect upto 80 different Model:-
#===================================================================
class Darknet(nn.Module):
def __init__(self, config_path, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path)
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets=None):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
#=========================================================================================================
#Here is the Use of Weights File by which we can train the images of objects and passes to the next one:-
#=========================================================================================================
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file:-
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) #First five are header values:-
self.header_info = header #Needed to write header when saving weights:-
self.seen = header[3] #Number of images seen during training:-
weights = np.fromfile(f, dtype=np.float32) #The rest are weights:-
#For Establish cutoff for loading backbone Weights:-
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance:-
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias:-
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight:-
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean:-
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var:-
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias:-
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights:-
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers:-
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first:-
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias:-
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights:-
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
#===========================================================================
#Now Whole Code is Written For How GUI is Generated and How it Works:-
#===========================================================================
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(1203, 554)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.groupBox_count = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_count.setGeometry(QtCore.QRect(990, 10, 211, 341))
self.groupBox_count.setObjectName("groupBox_count")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox_count)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout_count = QtWidgets.QGridLayout()
self.gridLayout_count.setContentsMargins(2, 2, 2, 2)
self.gridLayout_count.setSpacing(6)
self.gridLayout_count.setObjectName("gridLayout_count")
self.label_truck = QtWidgets.QLabel(self.groupBox_count)
self.label_truck.setObjectName("label_truck")
self.gridLayout_count.addWidget(self.label_truck, 2, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_7 = QtWidgets.QLabel(self.groupBox_count)
self.label_7.setObjectName("label_7")
self.gridLayout_count.addWidget(self.label_7, 4, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_5 = QtWidgets.QLabel(self.groupBox_count)
self.label_5.setObjectName("label_5")
self.gridLayout_count.addWidget(self.label_5, 2, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_6 = QtWidgets.QLabel(self.groupBox_count)
self.label_6.setObjectName("label_6")
self.gridLayout_count.addWidget(self.label_6, 3, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_motorbike = QtWidgets.QLabel(self.groupBox_count)
self.label_motorbike.setObjectName("label_motorbike")
self.gridLayout_count.addWidget(self.label_motorbike, 3, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_bus = QtWidgets.QLabel(self.groupBox_count)
self.label_bus.setObjectName("label_bus")
self.gridLayout_count.addWidget(self.label_bus, 1, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_bicycle = QtWidgets.QLabel(self.groupBox_count)
self.label_bicycle.setObjectName("label_bicycle")
self.gridLayout_count.addWidget(self.label_bicycle, 4, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_12 = QtWidgets.QLabel(self.groupBox_count)
self.label_12.setObjectName("label_12")
self.gridLayout_count.addWidget(self.label_12, 5, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_3 = QtWidgets.QLabel(self.groupBox_count)
self.label_3.setObjectName("label_3")
self.gridLayout_count.addWidget(self.label_3, 0, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_sum = QtWidgets.QLabel(self.groupBox_count)
self.label_sum.setObjectName("label_sum")
self.gridLayout_count.addWidget(self.label_sum, 5, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_car = QtWidgets.QLabel(self.groupBox_count)
self.label_car.setObjectName("label_car")
self.gridLayout_count.addWidget(self.label_car, 0, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_4 = QtWidgets.QLabel(self.groupBox_count)
self.label_4.setObjectName("label_4")
self.gridLayout_count.addWidget(self.label_4, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.verticalLayout_2.addLayout(self.gridLayout_count)
self.label_image = QtWidgets.QLabel(self.centralwidget)
self.label_image.setGeometry(QtCore.QRect(10, 10, 960, 540))
self.label_image.setStyleSheet("background-color: rgb(3, 187, 133);")
self.label_image.setText("")
self.label_image.setAlignment(QtCore.Qt.AlignCenter)
self.label_image.setObjectName("label_image")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(1020, 360, 151, 181))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_openVideo = QtWidgets.QPushButton(self.widget)
self.pushButton_openVideo.setObjectName("pushButton_openVideo")
self.verticalLayout.addWidget(self.pushButton_openVideo)
self.pushButton_selectArea = QtWidgets.QPushButton(self.widget)
self.pushButton_selectArea.setObjectName("pushButton_selectArea")
self.verticalLayout.addWidget(self.pushButton_selectArea)
self.pushButton_start = QtWidgets.QPushButton(self.widget)
self.pushButton_start.setObjectName("pushButton_start")
self.verticalLayout.addWidget(self.pushButton_start)
self.pushButton_pause = QtWidgets.QPushButton(self.widget)
self.pushButton_pause.setObjectName("pushButton_pause")
self.verticalLayout.addWidget(self.pushButton_pause)
mainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "Car Counter"))
self.groupBox_count.setTitle(_translate("mainWindow", "Counting Results"))
self.label_truck.setText(_translate("mainWindow", "0"))
self.label_7.setText(_translate("mainWindow", "Bicycle"))
self.label_5.setText(_translate("mainWindow", "Truck"))
self.label_6.setText(_translate("mainWindow", "Motorbike"))
self.label_motorbike.setText(_translate("mainWindow", "0"))
self.label_bus.setText(_translate("mainWindow", "0"))
self.label_bicycle.setText(_translate("mainWindow", "0"))
self.label_12.setText(_translate("mainWindow", "Sum"))
self.label_3.setText(_translate("mainWindow", "Car"))
self.label_sum.setText(_translate("mainWindow", "0"))
self.label_car.setText(_translate("mainWindow", "0"))
self.label_4.setText(_translate("mainWindow", "Bus"))
self.pushButton_openVideo.setText(_translate("mainWindow", "Open Video"))
self.pushButton_selectArea.setText(_translate("mainWindow", "Please Select the Area"))
self.pushButton_start.setText(_translate("mainWindow", "Please Start"))
self.pushButton_pause.setText(_translate("mainWindow", "You Can Pause it"))
#===================================================================================
#Main Class Loading While We write python Main.py on Anaconda Prompt:-
#===================================================================================
class App(QMainWindow,Ui_mainWindow):
def __init__(self):
super(App,self).__init__()
self.setupUi(self)
self.label_image_size = (self.label_image.geometry().width(),self.label_image.geometry().height())
self.video = None
self.exampleImage = None
self.imgScale = None
self.get_points_flag = 0
self.countArea = []
self.road_code = None
self.time_code = None
self.show_label = names
#Button function:-
self.pushButton_selectArea.clicked.connect(self.select_area)
self.pushButton_openVideo.clicked.connect(self.open_video)
self.pushButton_start.clicked.connect(self.start_count)
self.pushButton_pause.clicked.connect(self.pause)
self.label_image.mouseDoubleClickEvent = self.get_points
self.pushButton_selectArea.setEnabled(False)
self.pushButton_start.setEnabled(False)
self.pushButton_pause.setEnabled(False)
#Some flags:-
self.running_flag = 0
self.pause_flag = 0
self.counter_thread_start_flag = 0
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = "config/coco.data"
weights_path = "Algo/yolov3.weights"
model_def = "config/yolov3.cfg"
data_config = parse_data_config(data_config)
self.yolo_class_names = load_classes(data_config["names"])
#For Initiate model:-
print("Loading model ...")
self.yolo_model = Darknet(model_def).to(self.device)
if weights_path.endswith(".weights"):
#For Loading the darknet Weights:-
self.yolo_model.load_darknet_weights(weights_path)
else:
#For Loading the Checkpoint Weights:-
self.yolo_model.load_state_dict(torch.load(weights_path))
#For Counter Thread:-
self.counterThread = CounterThread(self.yolo_model,self.yolo_class_names,self.device)
self.counterThread.sin_counterResult.connect(self.show_image_label)
self.counterThread.sin_done.connect(self.done)
self.counterThread.sin_counter_results.connect(self.update_counter_results)
def open_video(self):
openfile_name = QFileDialog.getOpenFileName(self,'Open video','','Video files(*.avi , *.mp4)')
self.videoList = [openfile_name[0]]
vid = cv2.VideoCapture(self.videoList[0])
while vid.isOpened():
ret, frame = vid.read()
if ret:
self.exampleImage = frame
self.show_image_label(frame)
self.imgScale = np.array(frame.shape[:2]) / [self.label_image_size[1], self.label_image_size[0]]
vid.release()
break
self.pushButton_selectArea.setEnabled(True)
self.pushButton_start.setText("Please Start the Video")
self.pushButton_start.setEnabled(False)
self.pushButton_pause.setText("You Can Pause it")
self.pushButton_pause.setEnabled(False)
#clear counting results
KalmanBoxTracker.count = 0
self.label_sum.setText("0")
self.label_sum.repaint()
def get_points(self, event):
if self.get_points_flag:
x = event.x()
y = event.y()
self.countArea.append([int(x*self.imgScale[1]),int(y*self.imgScale[0])])
exampleImageWithArea = copy.deepcopy(self.exampleImage)
for point in self.countArea:
exampleImageWithArea[point[1]-10:point[1]+10,point[0]-10:point[0]+10] = (0,255,255)
cv2.fillConvexPoly(exampleImageWithArea, np.array(self.countArea), (0,0,255))
self.show_image_label(exampleImageWithArea)
print(self.countArea)
def select_area(self):
#To Change the Area needs update exampleImage
if self.counter_thread_start_flag:
ret, frame = self.videoCapture.read()
if ret:
self.exampleImage = frame
self.show_image_label(frame)
if not self.get_points_flag:
self.pushButton_selectArea.setText("Please Submit the Area")
self.get_points_flag = 1
self.countArea = []
self.pushButton_openVideo.setEnabled(False)
self.pushButton_start.setEnabled(False)
else:
self.pushButton_selectArea.setText("Please Select the Area")
self.get_points_flag = 0
exampleImage = copy.deepcopy(self.exampleImage)
#To Painting an Area:-
for i in range(len(self.countArea)):
cv2.line(exampleImage, tuple(self.countArea[i]), tuple(self.countArea[(i + 1) % (len(self.countArea))]), (0, 0, 255), 2)
self.show_image_label(exampleImage)
#To Enable start button
self.pushButton_openVideo.setEnabled(True)
self.pushButton_start.setEnabled(True)
def show_image_label(self, img_np):
img_np = cv2.cvtColor(img_np,cv2.COLOR_BGR2RGB)
img_np = cv2.resize(img_np, self.label_image_size)
frame = QImage(img_np, self.label_image_size[0], self.label_image_size[1], QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.label_image.setPixmap(pix)
self.label_image.repaint()
def start_count(self):
if self.running_flag == 0:
#To Clear count and display:-
KalmanBoxTracker.count = 0
for item in self.show_label:
vars(self)[f"label_{item}"].setText('0')
#To Clear Final file:-
with open("Final/Final.txt", "w") as f:
pass
#To Start:-
self.running_flag = 1
self.pause_flag = 0
self.pushButton_start.setText("You Can Stop it")
self.pushButton_openVideo.setEnabled(False)
self.pushButton_selectArea.setEnabled(False)
#To Emit new parameter to counter thread:-
self.counterThread.sin_runningFlag.emit(self.running_flag)
self.counterThread.sin_countArea.emit(self.countArea)
self.counterThread.sin_videoList.emit(self.videoList)
#To Start the counter thread:-
self.counterThread.start()
self.pushButton_pause.setEnabled(True)
elif self.running_flag == 1: #push pause button
#To Stop the System:-
self.running_flag = 0
self.counterThread.sin_runningFlag.emit(self.running_flag)
self.pushButton_openVideo.setEnabled(True)
self.pushButton_selectArea.setEnabled(True)
self.pushButton_start.setText("Please Start")
def done(self,sin):
if sin == 1:
self.pushButton_openVideo.setEnabled(True)
self.pushButton_start.setEnabled(False)
self.pushButton_start.setText("Start")
def update_counter_results(self,counter_results):
with open("Final/Final.txt", "a") as f:
for i, result in enumerate(counter_results):
label_var = vars(self)[f"label_{result[2]}"]
label_var.setText(str(int(label_var.text())+1))
label_var.repaint()
label_sum_var = vars(self)[f"label_sum"]
label_sum_var.setText(str(int(label_sum_var.text()) + 1))
label_sum_var.repaint()
f.writelines(' '.join(map(lambda x: str(x),result)))
f.write(("\n"))
# print("************************************************",len(counter_results))
def pause(self):
if self.pause_flag == 0:
self.pause_flag = 1
self.pushButton_pause.setText("Continue")
self.pushButton_start.setEnabled(False)
else:
self.pause_flag = 0
self.pushButton_pause.setText("Pause")
self.pushButton_start.setEnabled(True)
self.counterThread.sin_pauseFlag.emit(self.pause_flag)
#================================================================
#Main Function of Project Running:-
#================================================================
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = App()
myWin.show()
sys.exit(app.exec_())
#======================================================================================================================
|
{"/Main.py": ["/Detection/detect2.py"]}
|
26,048
|
vakhov/timetable-of-classes
|
refs/heads/master
|
/init_lessons.py
|
"""Заполнение таблицы данными о занятиях"""
from datetime import datetime, timedelta
from app import db, Lesson
now = datetime.utcnow()
def td(days=1, hours=0):
return now + timedelta(days=days, hours=hours)
def init_lessons():
db.create_all()
lessons = [
dict(subject='Физика', datetime=td(1), room='1C', address='Ленина 20а', group='ПР1',
topic='Теория относительности'),
dict(subject='Программирование', datetime=td(1, 2), room='2B', address='Ленина 20а', group='ПР1',
topic='Представление алгоритмов'),
dict(subject='Физика', datetime=td(2), room='3A', address='Ленина 20а', group='ПР2',
topic='Траектория'),
dict(subject='Программирование', datetime=td(2, 2), room='1C', address='Ленина 20а', group='ПР2',
topic='Типы данных, операции и выражения'),
dict(subject='Физика', datetime=td(3), room='11D', address='Ленина 20а', group='ПР3',
topic='Средняя скорость'),
dict(subject='Программирование', datetime=td(3, 2), room='6C', address='Ленина 20а', group='ПР3',
topic='Отладка простейших задач'),
dict(subject='Физика', datetime=td(4), room='1C', address='Ленина 20а', group='ПР4',
topic='Сообщающиеся сосуды'),
dict(subject='Программирование', datetime=td(4, 2), room='2C', address='Ленина 20а', group='ПР4',
topic='Указатели и операции с адресами')
]
for lesson in lessons:
db.session.add(Lesson(**lesson))
db.session.commit()
if __name__ == '__main__':
init_lessons()
|
{"/init_lessons.py": ["/app.py"]}
|
26,049
|
vakhov/timetable-of-classes
|
refs/heads/master
|
/bot.py
|
"""Telegram Bot - Расписание занятий преподователя"""
from datetime import datetime
import telegram
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
TOKEN = '<TELEGRAM BOT TOKEN>'
bot = telegram.Bot(token=TOKEN)
URL = '<WEB SERVER URI>'
class Lesson(db.Model):
__tablename__ = 'lessons'
id = db.Column(db.Integer, primary_key=True)
subject = db.Column(db.String(100), nullable=False)
datetime = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
room = db.Column(db.String(4), nullable=False)
address = db.Column(db.String(100), nullable=False)
group = db.Column(db.String(10), nullable=False)
topic = db.Column(db.String(100), nullable=False)
def repr(self):
return '<Lessons %r>' % self.subject
def all_lessons():
"""Список всех уроков преподователя
По умолчанию выводим только первые 5
"""
lessons_query = db.session.query(Lesson).order_by(Lesson.datetime).limit(5)
html = ''
for lesson_item in lessons_query:
html += """
Предмет - {lesson.subject}
Дата и время - {lesson.datetime}
Место проведения - {lesson.address}
/lesson_{lesson.id}
""".format(lesson=lesson_item)
return html
def lesson(lesson_id):
"""Информация о занятии"""
lesson_query = db.session.query(Lesson).get_or_404(lesson_id)
html = """
Предмет - {lesson.subject}
Дата и время - {lesson.datetime}
Место проведения - {lesson.address}
Аудитория - {lesson.room}
Группа студентов - {lesson.group}
Тема занятия - {lesson.topic}
Чтобы увидеть все занятия нажмите /all
""".format(lesson=lesson_query)
return html
@app.route('/hook', methods=['POST', 'GET'])
def webhook_handler():
if request.method == 'POST':
try:
response = ''
update = telegram.Update.de_json(request.get_json(force=True), bot)
chat_id = update.message.chat.id
text = update.message.text
if text in ['/start', '/help']:
response = """
Привет, я помощник в расписании!
Чтобы увидеть все занятия нажмите /all
"""
elif text == '/all':
response = all_lessons()
elif '/lesson_' in text:
command, lesson_id = text.split('_')
response = lesson(lesson_id)
bot.send_message(chat_id=chat_id, text=response)
except Exception as err:
pass
return 'ok'
@app.route('/set_webhook', methods=['GET', 'POST'])
def set_webhook():
"""Установка webhook"""
result = bot.setWebhook('https://{url}/hook'.format(url=URL))
if result is True:
return 'webhook is set'
return 'webhook is not set'
if __name__ == '__main__':
app.run()
|
{"/init_lessons.py": ["/app.py"]}
|
26,050
|
vakhov/timetable-of-classes
|
refs/heads/master
|
/app.py
|
"""Расписание занятий преподователя"""
from datetime import datetime
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
class Lesson(db.Model):
__tablename__ = 'lessons'
id = db.Column(db.Integer, primary_key=True)
subject = db.Column(db.String(100), nullable=False)
datetime = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
room = db.Column(db.String(4), nullable=False)
address = db.Column(db.String(100), nullable=False)
group = db.Column(db.String(10), nullable=False)
topic = db.Column(db.String(100), nullable=False)
def __repr__(self):
return '<Lessons %r>' % self.subject
@app.route('/lesson/all')
def all_lessons():
"""Список всех уроков преподователя
По умолчанию выводим только первые 5
"""
lessons_query = db.session.query(Lesson).order_by(Lesson.datetime).limit(5)
html = ''
for lesson_item in lessons_query:
html += """
<p>
Предмет - {lesson.subject}<br/>
Дата и время - {lesson.datetime}<br/>
Место проведения - {lesson.address}<br/>
<a href="/lesson/{lesson.id}">Подробнее</a>
</p>
""".format(lesson=lesson_item)
return html
@app.route('/lesson/<int:lesson_id>')
def lesson(lesson_id):
"""Информация о занятии"""
lesson_query = db.session.query(Lesson).get_or_404(lesson_id)
html = """
<p>
Предмет - {lesson.subject}<br/>
Дата и время - {lesson.datetime}<br/>
Место проведения - {lesson.address}<br/>
Аудитория - {lesson.room}<br/>
Группа студентов - {lesson.group}<br/>
Тема занятия - {lesson.topic}
</p>
""".format(lesson=lesson_query)
return html
if __name__ == '__main__':
app.run()
|
{"/init_lessons.py": ["/app.py"]}
|
26,063
|
yarickprih/django-word-frequency-analizer
|
refs/heads/master
|
/word_analizer/services.py
|
import math
from typing import Any, Dict, List
import nltk
from nltk import RegexpTokenizer, pos_tag
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.stem import SnowballStemmer, WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
def remove_punctuation_and_tockenize(text: str) -> str:
"""Removes punctutation signs from the text, makes it lower case and tokenizes it."""
text = text.lower()
tokenizer = RegexpTokenizer(r"\w+")
return tokenizer.tokenize(text)
def remove_stop_words(text):
"""Removes english stop words such as ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll"] """
return [w for w in remove_punctuation_and_tockenize(text) if not w in stop_words]
def create_freqdist(text):
"""Creates a FreqDict of a given list of tokenized words."""
return FreqDist(remove_stop_words(text)).items()
def max_freq(text):
"""Gets the biggest word frequency in the text"""
return max([freq for _, freq in create_freqdist(text)])
def calc_most_freq(text):
"""Finds most frequent words (if percerntage of frequency is more than 67%) in the text."""
return {
word: freq
for word, freq in create_freqdist(text)
if freq / max_freq(text) >= 0.67
}
def avg_freq(text):
"""Finds average frequent words (if percerntage of frequency is more than 34% but less than 66%) in the text."""
return {
word: freq
for word, freq in create_freqdist(text)
if freq / max_freq(text) <= 0.66 and freq / max_freq(text) >= 0.34
}
def calc_least_freq(text):
"""Finds least frequent words (if percerntage of frequency is less than 34%) in the text."""
return {
word: freq
for word, freq in create_freqdist(text)
if freq / max_freq(text) <= 0.33
}
|
{"/word_analizer/forms.py": ["/word_analizer/models.py", "/word_analizer/services.py"], "/word_analizer/views.py": ["/word_analizer/forms.py", "/word_analizer/models.py"], "/word_analizer/urls.py": ["/word_analizer/views.py"]}
|
26,064
|
yarickprih/django-word-frequency-analizer
|
refs/heads/master
|
/word_analizer/models.py
|
from django.db import models
from django.urls import reverse_lazy
from word_analizer import services
# Create your models here.
class Text(models.Model):
"""Model definition for raw text and it's word frequncies."""
text = models.TextField(
verbose_name="Text",
blank=False,
null=False,
default="",
help_text="Enter text in English to let us analize it.",
)
most_frequent = models.JSONField(
blank=True, null=True, verbose_name="Most Frequent Words"
)
average_frequency = models.JSONField(
blank=True, null=True, verbose_name="Average Frequency Words"
)
least_frequent = models.JSONField(
blank=True, null=True, verbose_name="Least Frequent Words"
)
class Meta:
"""Meta definition."""
verbose_name = "Text"
verbose_name_plural = "Texts"
def __str__(self):
return self.text[:15]
def get_absolute_url(self):
return reverse_lazy("text_detail_view", args=[self.pk])
def save(self, *args, **kwargs):
self.most_frequent = services.calc_most_freq(self.text)
self.average_frequency = services.avg_freq(self.text)
self.least_frequent = services.calc_least_freq(self.text)
super().save(*args, **kwargs)
|
{"/word_analizer/forms.py": ["/word_analizer/models.py", "/word_analizer/services.py"], "/word_analizer/views.py": ["/word_analizer/forms.py", "/word_analizer/models.py"], "/word_analizer/urls.py": ["/word_analizer/views.py"]}
|
26,065
|
yarickprih/django-word-frequency-analizer
|
refs/heads/master
|
/word_analizer/forms.py
|
from django import forms
from .models import Text
import word_analizer.services as services
class TextForm(forms.ModelForm):
text = forms.CharField(
widget=forms.Textarea(attrs={"rows": "5", "class": "form-control"})
)
class Meta:
model = Text
fields = ("text",)
|
{"/word_analizer/forms.py": ["/word_analizer/models.py", "/word_analizer/services.py"], "/word_analizer/views.py": ["/word_analizer/forms.py", "/word_analizer/models.py"], "/word_analizer/urls.py": ["/word_analizer/views.py"]}
|
26,066
|
yarickprih/django-word-frequency-analizer
|
refs/heads/master
|
/word_analizer/views.py
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, ListView
from .forms import TextForm
from .models import Text
class TextListView(ListView):
"""List View of analized texts."""
model = Text
template_name = "word_analizer/text_list.html"
class TextDetailView(DetailView):
"""Detail View of specific analized text."""
model = Text
template_name = "word_analizer/text_detail.html"
class TextCreateView(CreateView):
"""Create View for raw text."""
model = Text
form_class = TextForm
template_name = "word_analizer/text_create.html"
|
{"/word_analizer/forms.py": ["/word_analizer/models.py", "/word_analizer/services.py"], "/word_analizer/views.py": ["/word_analizer/forms.py", "/word_analizer/models.py"], "/word_analizer/urls.py": ["/word_analizer/views.py"]}
|
26,067
|
yarickprih/django-word-frequency-analizer
|
refs/heads/master
|
/word_analizer/urls.py
|
from django.urls import path
from .views import TextDetailView, TextListView, TextCreateView
urlpatterns = [
path("", TextCreateView.as_view(), name="text_create_view"),
path("texts/", TextListView.as_view(), name="text_list_view"),
path("<pk>/", TextDetailView.as_view(), name="text_detail_view"),
]
|
{"/word_analizer/forms.py": ["/word_analizer/models.py", "/word_analizer/services.py"], "/word_analizer/views.py": ["/word_analizer/forms.py", "/word_analizer/models.py"], "/word_analizer/urls.py": ["/word_analizer/views.py"]}
|
26,080
|
marcellamartns/agenda
|
refs/heads/master
|
/conexao.py
|
# -*- coding: utf-8 -*-
from usuario import Usuario
from contato import Contato
from pymongo import MongoClient
from bson.objectid import ObjectId
class Conexao(object):
def __init__(self, banco):
conexao_banco = MongoClient('mongodb://localhost:27017/')
nome_banco = conexao_banco[banco]
self._colecao = nome_banco["usuario"]
def inserir_usuario(self, usuario):
self._colecao.insert_one(usuario.dicionario_inserir())
def inserir_contato(self, id_usuario, contato):
qry = {"_id": ObjectId(id_usuario)}
fld = {"$push": {"contatos": contato.dicionario_inserir_contato()}}
a = self._colecao.update_one(qry, fld)
# print(a.matched_count)
def atualizar_contato(self, id_usuario, contato):
qry = {"_id": ObjectId(id_usuario), "contatos._id": ObjectId(contato.id_)}
fld = {"$set": {
"contatos.$.nome_contato": contato.nome_contato,
"contatos.$.telefone": contato.telefone,
"contatos.$.email": contato.email,
"contatos.$.complemento": contato.complemento
}
}
a = self._colecao.update_one(qry, fld)
# print(a.matched_count)
def busca_usuario(self, usuario, senha):
qry = {"nome_usuario": usuario, "senha": senha}
return self._colecao.find_one(qry)
def busca_nome_usuario(self, nome_usuario):
return self._colecao.find_one({"nome_usuario": nome_usuario})
def busca_contatos(self, idusuario):
usuario = self._colecao.find_one({"_id": ObjectId(idusuario)})
contatos_lista = []
for contatos in usuario["contatos"]:
contato = Contato(contatos["_id"], contatos["nome_contato"],
contatos["telefone"], contatos["email"],
contatos["complemento"])
# print(contato)
contatos_lista.append(contato)
return contatos_lista
def busca_contato(self, id_usuario, id_contato):
qry = {"_id": ObjectId(id_usuario)}
fld = {
"contatos": {
"$elemMatch": {"_id": ObjectId(id_contato)}
}
}
usuario = self._colecao.find_one(qry, fld)
# print(usuario)
for contatos in usuario["contatos"]:
return Contato(contatos["_id"], contatos["nome_contato"],
contatos["telefone"], contatos["email"],
contatos["complemento"])
def deleta_contato(self, id_usuario, id_contato):
qry = {"_id": ObjectId(id_usuario)}
fld = {"$pull": {"contatos": {"_id": ObjectId(id_contato)}}}
self._colecao.update_one(qry, fld)
|
{"/conexao.py": ["/usuario.py", "/contato.py"], "/agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"], "/main_agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"]}
|
26,081
|
marcellamartns/agenda
|
refs/heads/master
|
/agenda.py
|
# -*- coding: utf-8 -*-
from usuario import Usuario
from contato import Contato
from conexao import Conexao
contato = Contato(nome_contato="Joana", telefone="9988721341",
email="joana@123.com", complemento="trabalho")
usuario = Usuario(nome_usuario="marcella", senha="123", contatos=contato)
conexao = Conexao("agenda")
# id_usuario=usuario.id_
# conexao.inserir_usuario(usuario)
# print("Usuario ok")
# print(usuario.id_)
# conexao.inserir_contato("5c067bbf9dc6d619bd718da1", contato)
# print("Inserir contato ok")
# conexao.buca_usuario("marcela", "123")
# conexao.atualizar_contato(usuario.id_, contato)
# s = conexao.busca_contatos("5c0811a49dc6d64c51d17e1b")
# for contato in s:
# print(contato.nome_contato)
# a= conexao.busca_contato("5c0811a49dc6d64c51d17e1b", "5c08180a9dc6d653b70bfc8e")
# print(a)
conexao.deleta_contato("5c0811a49dc6d64c51d17e1b", "5c08180a9dc6d653b70bfc8e")
|
{"/conexao.py": ["/usuario.py", "/contato.py"], "/agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"], "/main_agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"]}
|
26,082
|
marcellamartns/agenda
|
refs/heads/master
|
/usuario.py
|
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
class Usuario(object):
def __init__(self, id_=None, nome_usuario=None, senha=None, contatos=None):
self._id = id_ if id_ else ObjectId()
self._nome_usuario = nome_usuario
self._senha = senha
self._contatos = contatos
@property
def id_(self):
return self._id
@property
def nome_usuario(self):
return self._nome_usuario
def dicionario_inserir(self):
return {
"_id": self._id,
"nome_usuario": self._nome_usuario,
"senha": self._senha,
"contatos": []
}
|
{"/conexao.py": ["/usuario.py", "/contato.py"], "/agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"], "/main_agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"]}
|
26,083
|
marcellamartns/agenda
|
refs/heads/master
|
/main_agenda.py
|
# -*- coding: utf-8 -*-
from usuario import Usuario
from contato import Contato
from conexao import Conexao
import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
if not self.get_cookie("cookieagenda"):
self.redirect("/autenticar")
else:
self.render("principal.html")
class Cadastro(tornado.web.RequestHandler):
def get(self):
self.render("cadastro.html")
def post(self):
conexao = Conexao("agenda")
usuario = self.get_argument("nomeusuario")
senha = self.get_argument("senhausuario")
usuarios = conexao.busca_nome_usuario(usuario)
print("AQUI")
if usuarios:
print("usuario existe")
self.render("cadastro.html")
else:
print("usuario ok")
novo_usuario = Usuario(nome_usuario=usuario, senha=senha)
conexao.inserir_usuario(novo_usuario)
self.redirect("/autenticar")
class Agenda(tornado.web.RequestHandler):
def get(self):
conexao = Conexao("agenda")
id_usuario = self.get_secure_cookie("cookieagenda").decode("utf-8")
contatos = conexao.busca_contatos(id_usuario)
self.render("agenda.html", contatos=contatos)
def post(self):
self.render("agenda.html", contatos="")
class ContatoHandler(tornado.web.RequestHandler):
def get(self, contato_id):
conexao = Conexao("agenda")
id_usuario = self.get_secure_cookie("cookieagenda").decode("utf-8")
contato = conexao.busca_contato(id_usuario, contato_id)
self.render("atualiza_contato.html", contato=contato)
def put(self, contato_id):
conexao = Conexao("agenda")
id_usuario = self.get_secure_cookie("cookieagenda").decode("utf-8")
json_data = json.loads(self.request.body.decode("utf-8"))
print(json_data)
contato = Contato(contato_id, json_data["nome"], json_data["telefone"],
json_data["email"], json_data["complemento"])
conexao.atualizar_contato(id_usuario, contato)
self.render("atualiza_contato.html", contato=contato)
def delete(self, contato_id):
conexao = Conexao("agenda")
id_usuario = self.get_secure_cookie("cookieagenda").decode("utf-8")
conexao.deleta_contato(id_usuario, contato_id)
self.write("ok")
class AdicionarContatos(tornado.web.RequestHandler):
def get(self):
self.render("add_contato.html")
def post(self):
conexao = Conexao("agenda")
nome = self.get_argument("nomecontato")
telefone = self.get_argument("telefone")
email = self.get_argument("email")
complemento = self.get_argument("complemento")
contato = Contato(nome_contato=nome, telefone=telefone, email=email,
complemento=complemento)
id_usuario = self.get_secure_cookie("cookieagenda").decode("utf-8")
conexao.inserir_contato(id_usuario, contato)
self.redirect("/")
class Autenticar(tornado.web.RequestHandler):
def get(self):
self.render("autenticar.html", teste="bibi")
def post(self):
conexao = Conexao("agenda")
nome = self.get_argument("usuario")
senha = self.get_argument("senha")
usuario = conexao.busca_usuario(nome, senha)
if usuario:
self.set_secure_cookie("cookieagenda", str(usuario["_id"]))
self.redirect("/")
else:
self.render("autenticar.html", teste="invalido")
class Sair(tornado.web.RequestHandler):
def get(self):
self.clear_cookie("cookieagenda")
self.redirect("/autenticar")
def make_app():
return tornado.web.Application([
(r"/cadastro", Cadastro),
(r"/autenticar", Autenticar),
(r"/sair", Sair),
(r"/contato/([a-z0-9]+)", ContatoHandler),
(r"/", MainHandler),
(r"/agenda", Agenda),
(r"/contatos", AdicionarContatos),
],
cookie_secret="jhlhçgguilyojhlfhlfyupfyoupfyufy",
static_path="static"
)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
{"/conexao.py": ["/usuario.py", "/contato.py"], "/agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"], "/main_agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"]}
|
26,084
|
marcellamartns/agenda
|
refs/heads/master
|
/contato.py
|
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
class Contato(object):
def __init__(self, id_=None, nome_contato=None, telefone=None, email=None,
complemento=None):
self._id = id_ if id_ else ObjectId()
self._nome_contato = nome_contato
self._telefone = telefone
self._email = email
self._complemento = complemento
@property
def id_(self):
return self._id
@property
def nome_contato(self):
return self._nome_contato
@property
def telefone(self):
return self._telefone
@property
def email(self):
return self._email
@property
def complemento(self):
return self._complemento
def dicionario_inserir_contato(self):
return {
"_id": self.id_,
"nome_contato": self._nome_contato,
"telefone": self._telefone,
"email": self._email,
"complemento": self._complemento
}
|
{"/conexao.py": ["/usuario.py", "/contato.py"], "/agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"], "/main_agenda.py": ["/usuario.py", "/contato.py", "/conexao.py"]}
|
26,104
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/data/load_data.py
|
import pandas as pd
import os
import json
import shutil
import numpy as np
from gan_thesis.data.datagen import *
from definitions import DATA_DIR, ROOT_DIR
from dataset_spec import *
# from params import mvn_test1_highfeature, mvn_test2_highfeature
class Dataset:
def __init__(self, train, test, data, info, samples):
self.train = train
self.test = test
self.data = data
self.info = info
self.samples = samples
def get_columns(self):
d_col = self.info.get('discrete_columns')
c_col = self.info.get('continuous_columns')
return d_col, c_col
def load_data(dataset, data_params=None):
'''
:param dataset: A string with the following structure: 'dataset-identifier'. Dataset tells us which dataset to load,
identifier allows us to specify different tests. Example: mvn-test1
:param data_params: Required for synthetic datasets
:return: A Dataset variable
'''
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
pathname = os.path.join(DATA_DIR, *alist)
filelist = ['train.csv', 'test.csv', 'data.csv', 'info.json']
filelist = map(lambda x: os.path.join(pathname, x), filelist)
if not all([os.path.isfile(f) for f in filelist]):
# if os.path.exists(pathname):
# shutil.rmtree(pathname)
os.makedirs(pathname)
load_wrapper[dataset](pathname, data_params)
train = pd.read_csv(os.path.join(pathname, 'train.csv'))
test = pd.read_csv(os.path.join(pathname, 'test.csv'))
df = pd.read_csv(os.path.join(pathname, 'data.csv'))
samples_dir = {}
for model in ['ctgan', 'tgan', 'wgan']:
fname = os.path.join(pathname, model, '{0}_{1}_samples.csv'.format(dataset, model))
if os.path.isfile(fname):
samples = pd.read_csv(fname)
samples_dir[model] = samples
with open(os.path.join(pathname, 'info.json'), "r") as read_file:
info = json.load(read_file)
return Dataset(train, test, df, info, samples_dir)
def load_adult(dirname, *args):
n_test = 10000 # Same as CTGAN paper
info = {
"columns": ['age',
'workclass',
'fnlwgt',
'education',
'marital.status',
'occupation',
'relationship',
'race',
'sex',
'capital.gain',
'capital.loss',
'hours.per.week',
'native.country',
'income'],
"discrete_columns": ['workclass',
'education',
'marital.status',
'occupation',
'relationship',
'race',
'sex',
'native.country',
'income'],
"continuous_columns": ['age',
'fnlwgt',
'capital.gain',
'capital.loss',
'hours.per.week'],
"n_test": n_test,
"identifier": 'adult'
}
cc = info.get('columns')
df = pd.read_csv(os.path.join(ROOT_DIR, 'adult.csv'), usecols=cc, header=0)
# df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
# names=cc, header=0)
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=n_test)
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def load_news(dirname, *args):
n_test = 8000 # Same as CTGAN paper
info = {
"n_test": n_test,
"identifier": 'news'
}
df = pd.read_csv(os.path.join(ROOT_DIR, 'news.csv'), sep=',', header=0)
print(df.columns.to_list()[1])
df = df.drop('url', axis=1)
df = df.drop(' timedelta', axis=1)
cc = df.columns.to_list()
info['columns'] = cc
info['discrete_columns'] = cc[11:17] + cc[29:37]
info['continuous_columns'] = cc[:11] + cc[17:29] + cc[37:]
print(info['discrete_columns'])
print(info['continuous_columns'])
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=n_test)
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def load_bank(dirname, *args):
n_test = 10000 # Same as CTGAN paper
info = {
"n_test": n_test,
"identifier": 'bank'
}
df = pd.read_csv(os.path.join(ROOT_DIR, 'bank.csv'), sep=';', header=0)
cc = df.columns.to_list()
info['columns'] = cc
info['discrete_columns'] = cc[1:10] + [cc[14]] + [cc[20]]
info['continuous_columns'] = [cc[0]] + cc[10:14] + cc[15:20]
print(info['discrete_columns'])
print(info['continuous_columns'])
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=n_test)
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def load_telecom(dirname, *args):
n_test = 2000
info = {
"n_test": n_test,
"identifier": 'telecom'
}
df = pd.read_csv(os.path.join(ROOT_DIR, 'telecom.csv'), sep=',', header=0)
df = df.drop('customerID', axis=1)
cc = df.columns.to_list()
info['columns'] = cc
info['discrete_columns'] = cc[0:4] + cc[5:17] + [cc[19]]
info['continuous_columns'] = [cc[4]] + cc[17:19]
print(info['discrete_columns'])
print(info['continuous_columns'])
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=n_test)
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def load_credit(dirname, *args):
n_test = 29000 # Same as CTGAN paper
c = list(range(1, 29))
cc = ['V{0}'.format(str(x)) for x in c]
cc.insert(0, 'Time')
cc.append('Amount')
dc = ['Class']
info = {
"columns": cc + dc,
"discrete_columns": dc,
"continuous_columns": cc,
"n_test": n_test,
"identifier": 'credit'
}
cols = cc + dc
df = pd.read_csv(os.path.join(ROOT_DIR, 'creditcard.csv'), usecols=cols, header=0)
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=n_test)
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def load_mvn_mixture(pathname, data_params):
n_samples = data_params['n_samples']
proportions = data_params['proportions']
means = data_params['means']
corrs = data_params['corrs']
var = data_params['vars']
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info = mixture_gauss(n_samples, proportions, means, var, corrs, seed)
info['seed'] = seed
info['continuous_columns'] = df.columns.to_list()
info['discrete_columns'] = []
save_data(df, info, pathname)
def load_mvn(pathname, data_params):
n_samples = data_params['n_samples']
mean = data_params.get('mean')
corr = data_params.get('corr')
var = data_params.get('var')
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info = multivariate_df(n_samples, mean, var, corr, seed)
info['seed'] = seed
info['continuous_columns'] = df.columns.to_list()
info['discrete_columns'] = []
save_data(df, info, pathname)
def load_ln_mixture(pathname, data_params):
n_samples = data_params['n_samples']
proportions = data_params['proportions']
means = data_params['means']
corrs = data_params['corrs']
var = data_params['vars']
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info = mixture_log_normal(n_samples, proportions, means, var, corrs, seed)
info['seed'] = seed
info['continuous_columns'] = df.columns.to_list()
info['discrete_columns'] = []
save_data(df, info, pathname)
def load_multinomial(pathname, data_params):
n_samples = data_params.get('n_samples')
probabilities = data_params.get('probabilities')
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info = multinomial(n_samples, probabilities, seed=seed)
info['seed'] = seed
info['discrete_columns'] = df.columns.to_list()
info['continuous_columns'] =[]
save_data(df, info, pathname)
def load_cond_multinomial(pathname, data_params):
n_samples = data_params.get('n_samples')
ind_probs = data_params.get('ind_probs')
cond_probs = data_params.get('cond_probs')
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info= multinomial_cond(n_samples, ind_probs, cond_probs, seed)
info['seed'] = seed
info['discrete_columns'] = df.columns.to_list()
info['continuous_columns'] =[]
save_data(df, info, pathname)
def load_gauss_cond(pathname, data_params):
n_samples = data_params.get('n_samples')
ind_probs = data_params.get('ind_probs')
cond_probs = data_params.get('cond_probs')
if cond_probs is None:
mode = 'ind_cat'
else:
mode = 'cond_cat'
means = data_params.get('means')
corrs = data_params.get('corrs')
var = data_params.get('vars')
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
if mode == 'ind_cat':
cond_df, cond_info = multinomial(n_samples, ind_probs, seed = seed)
else:
cond_df, cond_info = multinomial_cond(n_samples, ind_probs, cond_probs, seed)
df, info = cat_mixture_gauss(cond_df, cond_info, means, var, corrs, seed)
info['seed'] = seed
info['continuous_columns'] = [f for f in df.columns.to_list() if f not in cond_df.columns.to_list()]
info['discrete_columns'] = cond_df.columns.to_list()
save_data(df, info, pathname)
def load_gauss_cond_ext(pathname, data_params):
n_samples = data_params.get('n_samples')
tind_probs = data_params.get('true_ind_probs')
ind_probs = data_params.get('ind_probs')
cond_probs = data_params.get('cond_probs')
def load_ln(pathname, data_params):
n_samples = data_params['n_samples']
mean = data_params['mean']
var = data_params['var']
corr = data_params['corr']
if data_params.get('seed') is None:
seed = np.random.randint(10000)
else:
seed = data_params.get('seed')
df, info = log_normal_df(n_samples, mean, var, corr, seed)
info['seed'] = seed
info['continuous_columns'] = df.columns.to_list()
info['discrete_columns'] = []
save_data(df, info, pathname)
def save_data(df, info, dirname):
df = df.sample(frac=1).reset_index(drop=True)
train, test = train_test_split(df=df, n_test=int(np.floor(0.1 * len(df))))
df.to_csv(os.path.join(dirname, 'data.csv'), index=False)
train.to_csv(os.path.join(dirname, 'train.csv'), index=False)
test.to_csv(os.path.join(dirname, 'test.csv'), index=False)
with open(os.path.join(dirname, 'info.json'), "w") as write_file:
json.dump(info, write_file)
def train_test_split(df, n_test):
assert n_test < len(df), "n_test larger than n_tot"
test = df.sample(n=n_test)
train = df.drop(test.index)
return train, test
def save_samples(df, dataset, model, force=True):
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
fname = os.path.join(DATA_DIR, *alist, model, '{0}_{1}_samples.csv'.format(dataset, model))
if os.path.isfile(fname) and not force:
return
base_path = os.path.dirname(fname)
if not os.path.exists(base_path):
os.makedirs(base_path)
df.to_csv(fname, index=False)
load_wrapper = {
'adult': load_adult,
'news': load_news,
'telecom': load_telecom,
'bank': load_bank,
'credit': load_credit,
'mvn': load_mvn,
'mvn_mixture': load_mvn_mixture,
'ln': load_ln,
'ln_mixture': load_ln_mixture,
'cat': load_multinomial,
'cond_cat' : load_cond_multinomial,
'cat_mix_gauss' : load_gauss_cond
}
def main():
load_data('cat_mix_gauss-test1', gauss_mix_cond_test1)
load_data('cat_mix_gauss-test2', gauss_mix_cond_test2)
load_data('cat_mix_gauss-test3', gauss_mix_cond_test3)
if __name__ == '__main__':
main()
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,105
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/general/testbed.py
|
import shutil
from gan_thesis.evaluation.pMSE import *
from gan_thesis.evaluation.association import plot_all_association
from gan_thesis.evaluation.machine_learning import *
from gan_thesis.evaluation.plot_marginals import *
from gan_thesis.data.load_data import load_data
import os
import pandas as pd
#import gan_thesis.models.wgan.synthesizer as gan
import gan_thesis.models.wgan.synthesizer as gan
# import gan_thesis.models.wgan.synthesizer as gan
from definitions import RESULT_DIR
def main():
#for data in ['cat_mix_gauss-test1', 'cond_cat-test1', 'cat-test1', 'mvn-test3', 'mvn_mixture-test1', 'mvn_mixture-test2', 'ln-test1', 'ln-test2']:
# for data in ['cat_mix_gauss-test3',
# 'cond_cat-test1', 'cond_cat-test2', 'ln-test3',
# 'mvn-test1', 'mvn-test2', 'mvn-test3',
# 'mvn_mixture-test1', 'mvn_mixture-test2',
# 'ln-test1', 'ln-test2', 'ln-test3', 'cat-test1']:
# for i in range(3):
# print(i)
# data = 'cat_mix_gauss-test2'
# params = gan.DEF_PARAMS
# params['training_set'] = data
# params['eval'] = None
# gan.main(params, optim=False)
# #
# for i in range(3):
# print(i)
# data = 'cat_mix_gauss-test2'
# params = gan.DEF_PARAMS
# params['training_set'] = data
# params['gen_num_layers'] = 3
# params['crit_num_layers'] = 3
# params['eval'] = None
# if i == 2:
# params['eval'] = 'all'
# gan.main(params, optim = False)
# for i in range(3):
# print(i)
# data = 'cond_cat-test1'
# params = gan.DEF_PARAMS
# params['training_set'] = data
# params['eval'] = None
# if i == 2:
# params['eval'] = 'all'
# gan.main(params, optim = False)
# for i in range(3):
# print(i)
# data = 'adult'
# params = gan.DEF_PARAMS
# params['training_set'] = data
# params['eval'] = None
# gan.main(params, optim = False)
data = 'adult'
params = gan.DEF_PARAMS
params['training_set'] = data
params['EPOCHS'] = 300
params['eval'] = 'all'
params['gen_num_layers'] = 2
params['crit_num_layers'] = 2
gan.main(params, optim = False)
def pmse_loop():
output = pd.DataFrame(data=None, columns=['wgan', 'tgan', 'ctgan'])
output = []
for data in ['cat_mix_gauss-test1', 'cond_cat-test1', 'cat-test1', 'mvn-test1', 'mvn-test2', 'mvn-test3', 'mvn_mixture-test1', 'mvn_mixture-test2', 'ln-test1', 'ln-test2']:
dataset = load_data(data)
temp_output = []
for model in ['wgan', 'tgan', 'ctgan']:
print('dataset: '+data+', model: '+model)
samples = dataset.samples.get(model)
samples_oh = pd.get_dummies(samples)
train = dataset.train
train_oh = pd.get_dummies(train)
pmse = pMSE_ratio(real_df=train_oh, synth_df=samples_oh)
temp_output.append(pmse['logreg'])
output.append(temp_output)
output = pd.DataFrame(data=output, columns=['wgan', 'tgan', 'ctgan'], index=['cat_mix_gauss-test1', 'cond_cat-test1', 'cat-test1', 'mvn-test1', 'mvn-test2', 'mvn-test3', 'mvn_mixture-test1', 'mvn_mixture-test2', 'ln-test1', 'ln-test2'])
output.to_csv(os.path.join(RESULT_DIR, 'pmse.csv'))
def eval_loop():
for data in ['cat-test1',
'cat_mix_gauss-test1', 'cat_mix_gauss-test2', 'cat_mix_gauss-test3',
'cond_cat-test1', 'cond_cat-test2',
'ln-test2', 'ln-test3',
'mvn-test2', 'mvn-test3',
'mvn_mixture-test1', 'mvn_mixture-test2']:
print('Starting MLE evaluation on samples...')
dset = load_data(data)
plot_all_predictions_by_dimension(dataset, data)
print('Plotting marginals of real and sample data...')
plot_all_marginals(dataset, data)
print('Plotting association matrices...')
plot_all_association(dataset, data)
def ass_loop():
for data in ['ln-test2']:
dataset = load_data(data)
plot_all_association(dataset, data)
if __name__ == '__main__':
main()
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,106
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/wgan/synthesizer.py
|
from gan_thesis.evaluation.machine_learning import plot_predictions_by_dimension
from gan_thesis.evaluation.plot_marginals import plot_marginals
from gan_thesis.evaluation.association import plot_association
from gan_thesis.evaluation.pMSE import *
from gan_thesis.data.load_data import *
from gan_thesis.models.general.utils import save_model, load_model, save_json
#from gan_thesis.models.general.optimization import optimize
from gan_thesis.models.wgan.wgan import *
import datetime
import os
import pandas as pd
from definitions import RESULT_DIR
#from hyperopt import hp
EPOCHS = 1000
DEF_PARAMS = {
'eval': 'all',
# NN Hyperparameters
'EPOCHS' : EPOCHS,
'embedding_dim': 128,
'gen_num_layers': 2,
'gen_layer_sizes': 256,
'crit_num_layers': 2,
'crit_layer_sizes': 256,
'mode' : 'wgan-gp',
'gp_const' : 10,
'n_critic' : 5,
'batch_size': 500,
'hard' : False,
'temp_anneal' : False
}
# HYPEROPT SPACE
# space = {
# 'embedding_dim': 2 ** hp.quniform('embedding_dim', 4, 9, 1),
# 'gen_num_layers': hp.quniform('gen_num_layers', 1, 5, 1),
# 'gen_layer_sizes': 2 ** hp.quniform('gen_layer_sizes', 4, 9, 1),
# 'crit_num_layers': hp.quniform('crit_num_layers', 1, 5, 1),
# 'crit_layer_sizes': 2 ** hp.quniform('crit_layer_sizes', 4, 9, 1),
# 'l2scale': hp.loguniform('l2scale', np.log10(10 ** -6), np.log10(0.2)),
# 'batch_size': 50 * hp.quniform('batch_size', 1, 50, 1)
# }
def build_and_train(params):
gen_layers = [int(params['gen_layer_sizes'])] * int(params['gen_num_layers'])
crit_layers = [int(params['crit_layer_sizes'])] * int(params['crit_num_layers'])
d = params.get('dataset')
params['gen_dim'] = gen_layers
params['crit_dim'] = crit_layers
params['output_dim'] = d.info.get('dim')
epchs = params['EPOCHS']
my_wgan = WGAN(params)
print('Fitting a wgan model for {0} epochs...'.format(epchs))
max_iter = 7 ##Wgan overflows at ~950 epochs
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
for i in range(epchs//max_iter):
my_wgan.train(d.train, max_iter,
d.info.get('discrete_columns'),
d.info.get('continuous_columns'),
batch_size=params['batch_size'],
hard=params['hard'],
temp_anneal = params['temp_anneal'],
input_time=curr_time)
my_wgan.train(d.train, epchs%max_iter,
d.info.get('discrete_columns'),
d.info.get('continuous_columns'),
batch_size=params['batch_size'],
hard=params['hard'],
temp_anneal = params['temp_anneal'],
input_time=curr_time)
print('Successfully fitted a wgan model')
return my_wgan
def sampler(my_wgan, params):
d = params.get('dataset')
samples = my_wgan.sample_df(100000)
#col = d.train.columns
#samples.columns = col
samples = samples.astype(d.train.dtypes)
return samples
def optim_loss(samples, params):
d = params.get('dataset')
optim_df = add_indicator(real_df=d.train, synth_df=samples)
# one-hot-encode discrete features
one_hot_df = pd.get_dummies(optim_df, columns=d.info.get('discrete_columns'))
print(one_hot_df.head())
loss = pMSE(one_hot_df)
print(loss)
return loss
def main(params=None, optim=False):
if params is None:
params = {
# Regular parameters
'training_set': 'mvn-test2',
'eval': 'all',
# NN Hyperparameters
'EPOCHS' : EPOCHS,
'embedding_dim': 128,
'gen_num_layers': 2,
'gen_layer_sizes': 256,
'crit_num_layers': 2,
'crit_layer_sizes': 256,
'mode' : 'wgan-gp',
'gp_const' : 10,
'n_critic' : 5,
'batch_size': 500,
'hard' : False,
'temp_anneal' : False
}
if optim:
params.update(space) # Overwrite NN hyperparameters with stochastic variant from top of file
print('Starting wgan-gp main script with following parameters:')
for key in params:
print(key, params[key])
params['model'] = 'wgan'
# Load dataset
print(params.get('training_set'))
dataset = load_data(params.get('training_set'))
params['dataset'] = dataset
print('Successfully loaded dataset {0}'.format(params.get('training_set')))
alist = params.get('training_set').split(sep='-', maxsplit=1)
basepath = os.path.join(RESULT_DIR, *alist, params.get('model'))
filepath = os.path.join(basepath, '{0}_{1}_ass_diff.json'.format(alist[0], params.get('model')))
if params.get('log_directory') != None:
params['log_directory'] = os.path.join(basepath,params['log_directory'])
else:
params['log_directory'] = basepath
if optim:
# Optimize or load wgan model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model') + '_optimized')
if os.path.isfile(filename):
my_wgan = load_model(filename)
print('Successfully loaded old optimized wgan model from {0}'.format(filename))
else:
best, trials = optimize(params, filename+'.json')
my_wgan = build_and_train(best)
save_model(my_wgan, filename, force=True)
print('Saved the optimized wgan model at {0}'.format(filename))
else:
# Train or load wgan model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model'))
my_wgan = build_and_train(params=params)
# try:
# save_model(my_wgan, filename, force = True)
# print('Saved the wgan model at {0}'.format(filename))
# except Exception as e:
# print('Model was not saved due to an error: {0}'.format(e))
# #os.remove(filename)
#save_model(my_wgan, filename, force=True)
#print('Saved the wgan model at {0}'.format(filename))
# Sample from model
print('Sampling from the wgan model...')
samples = sampler(my_wgan, params)
save_samples(samples, params['training_set'], model='wgan')
print('Saved the wgan samples')
# Evaluate fitted model
if params['eval'] == 'all':
print('Starting MLE evaluation on samples...')
discrete_columns, continuous_columns = dataset.get_columns()
# plot_predictions_by_dimension(real=dataset.train, samples=samples, data_test=dataset.test,
# discrete_columns=discrete_columns, continuous_columns=continuous_columns,
# dataset=params.get('training_set'), model='wgan')
#print('Plotting marginals of real and sample data...')
plot_marginals(dataset.train, samples, params.get('training_set'), 'wgan')
#print('Plotting association matrices...')
#diff = plot_association(dataset, samples, params.get('training_set'), params.get('model'))
#print(diff)
#save_json(diff, filepath)
if __name__ == "__main__":
main()
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,107
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/evaluation/pMSE.py
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from gan_thesis.evaluation.machine_learning import *
from gan_thesis.data.load_data import load_data
N_ESTIM = 10
MODELS = {'logreg': LogisticRegression(penalty='none', max_iter=100),
'rforest': RandomForestClassifier(n_estimators=N_ESTIM),
'gboost': AdaBoostClassifier(n_estimators=N_ESTIM)}
MODELS = {'logreg': LogisticRegression(max_iter=100)}
def pMSE_ratio(real_df, synth_df, discrete_columns):
# Extract all discrete features, first line ensures that get all features from both real and synth
features = real_df.columns.to_list() + list(set(synth_df.columns.to_list()) - set(real_df.columns.to_list()))
discrete_used_feature_indices = [feature for feature in features if feature in discrete_columns]
one_hot_real = pd.get_dummies(real_df, columns=discrete_used_feature_indices)
one_hot_synth = pd.get_dummies(synth_df, columns=discrete_used_feature_indices)
ratio = {}
for model in MODELS:
pmse, k = pMSE(one_hot_real, one_hot_synth, MODELS[model])
if model == 'logreg':
N = len(real_df.index) + len(synth_df.index)
c = len(synth_df.index)/N
null = (k-1) * (1-c)**2 * (c/N)
else:
null = null_pmse(model)
ratio[model] = pmse/null
print('pmse: ', pmse)
print('null: ', null)
print('ratio: ', ratio)
return ratio
def pMSE(real_df, synth_df, model, shuffle=False, polynomials=True):
# This should be implemented with multiple models chosen by some variable
# For now it will be done with logistic regression as there is an analytical solutionn to the null value.
# ind_var is the name of the indicator variable
df = add_indicator(real_df, synth_df, shuffle)
predictors = df.iloc[:, :-1]
target = df.iloc[:, -1]
if polynomials:
poly = PolynomialFeatures(degree=2)
poly.fit_transform(predictors)
model.fit(predictors, target)
prediction = model.predict_proba(predictors)
c = len(synth_df.index) / len(df.index)
pmse = sum((prediction[:, 1] - c) ** 2) / len(df.index)
return pmse, model.coef_.size
def null_pmse_est(real_df, synth_df, n_iter):
# Randomly assigns the indicator variable
null = {}
for model in MODELS:
pmse = 0
for i in range(n_iter):
if i % 10 == 0: print('iteration {0}'.format(str(i)))
pmse += pMSE(real_df, synth_df, MODELS[model], shuffle=True)
pmse /= n_iter
null[model] = pmse
print(null)
df = pd.DataFrame(data=null, index=range(1))
save_path = os.path.join(os.path.dirname(__file__), 'null_pmse_est_{0}_{1}.csv'.format(str(N_ESTIM), str(n_iter)))
df.to_csv(save_path, index=False)
return pmse
def null_pmse(model):
load_path = os.path.join(os.path.dirname(__file__), 'null_pmse_est.csv')
df = pd.read_csv(load_path)
print(df[model][0])
return df[model][0]
def add_indicator(real_df, synth_df, shuffle=False):
""" Helper function which combines real data with synthetic and adds a corresponding indicator feature.
:param shuffle: If True. shuffle entire dataset after adding indicator (used when calculating null-pMSE)."""
r_df = real_df.copy()
r_df['ind'] = 0
s_df = synth_df.copy()
s_df['ind'] = 1
df = pd.concat((r_df, s_df), axis=0).reset_index(drop=True)
if shuffle:
df['ind'] = df['ind'].sample(frac=1).reset_index(drop=True)
return df
if __name__ == '__main__':
# This should be a dataset in which samples exists
dataset = load_data('mvn-test2')
real = dataset.train
samples = dataset.samples.get('ctgan')
null_pmse_est(real, samples, n_iter=100)
#N_ESTIM = 100
#null_pmse_est(real, samples, n_iter=1000)
# Our trials show that these give similar null_pmse estimates
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,108
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/definitions.py
|
import os
TEST_IDENTIFIER = ''
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(ROOT_DIR, 'datasets', TEST_IDENTIFIER)
RESULT_DIR = os.path.join(ROOT_DIR, 'results', TEST_IDENTIFIER)
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,109
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/evaluation/plot_marginals.py
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import kde
import os
from definitions import RESULT_DIR
from gan_thesis.data.load_data import *
def plot_marginals(real, synthetic, dataset, model, force=True):
cols = synthetic.columns
i_cont = real.columns.get_indexer(real.select_dtypes(np.number).columns)
i_cat = [i for i in range(len(cols)) if i not in i_cont]
j = 0
cols = 3
rows = np.ceil(len(i_cont) / cols)
plt.figure(figsize=(15, 10))
for i in i_cont:
j += 1
plt.subplot(rows, cols, j)
sns.distplot(synthetic.iloc[:, i], label='Synthetic')
sns.distplot(real.iloc[:, i], label="Real")
plt.legend()
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist, model)
filepath = os.path.join(basepath, '{0}_{1}_c_marginals.png'.format(dataset, model))
if not os.path.exists(basepath):
os.makedirs(basepath)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
temp = real.copy()
temp2 = synthetic.copy()
listofzeros = ['Real'] * len(real)
listofones = ['Synthetic'] * len(synthetic)
temp['Synthetic'] = listofzeros
temp2['Synthetic'] = listofones
frames = [temp, temp2]
result = pd.concat(frames)
j = 0
cols = 3
rows = np.ceil(len(i_cat) / cols)
plt.figure(figsize=(15, 10))
for i in i_cat:
j += 1
plt.subplot(rows, cols, j)
sns.countplot(x=real.columns.tolist()[i], data=result, hue='Synthetic')
plt.legend()
filepath = os.path.join(basepath, '{0}_{1}_d_marginals.png'.format(dataset, model))
if not os.path.exists(basepath):
os.makedirs(basepath)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
def contour_grid(df, title, nbins = 50, contour = True):
c = df.columns
num_var = len(c)
plt.figure(figsize = (15,10))
for i in range(num_var):
for j in range(num_var):
if j<i:
plt.subplot(num_var, num_var, 1+num_var*i + j)
countour_2d_plt(df[c[i]],df[c[j]], nbins, contour)
plt.show()
def kde_calc(x, y, nbins = 20):
k = kde.gaussian_kde((x,y))
xi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
return xi, yi, zi
def countour_2d_plt(x, y, nbins = 50, contour = True):
xi, yi, zi = kde_calc(x, y, nbins)
plt.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap='coolwarm')
if contour:
plt.contour(xi,yi, zi.reshape(xi.shape), colors = 'black')
return (xi, yi, zi)
def plot_all_marginals(dataset, data, force=True, pass_tgan=True):
real = dataset.train
cols = real.columns
alist = data.split(sep='-', maxsplit=1)
base_path = os.path.join(RESULT_DIR, *alist)
if not os.path.exists(base_path):
os.makedirs(base_path)
samples_wgan = dataset.samples.get('wgan')
samples_ctgan = dataset.samples.get('ctgan')
if pass_tgan:
samples_tgan = dataset.samples.get('tgan')
samples = [samples_wgan, samples_ctgan, samples_tgan]
models = ['WGAN', 'CTGAN', 'TGAN']
else:
samples = [samples_wgan, samples_ctgan]
models = ['WGAN', 'CTGAN']
i_cont = real.columns.get_indexer(real.select_dtypes(np.number).columns)
if data == 'telecom':
i_cont = np.delete(i_cont, 0)
i_cat = [i for i in range(len(cols)) if i not in i_cont]
# Plot a picture of all continuous columns in a (,3) grid with all models combined
j = 0
cols = 3
if data == 'news':
cols = 5
rows = np.ceil(len(i_cont) / cols)
plt.figure(figsize=(12, 3*rows))
for i in i_cont:
j += 1
plt.subplot(rows, cols, j)
sns.distplot(real.iloc[:, i], label="Real")
for k in range(len(samples)):
sns.distplot(samples[k].iloc[:, i], label=models[k])
if j == 2:
plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1),
ncol=4, fancybox=True, shadow=False)
else:
plt.legend('', frameon=False)
file_path = os.path.join(base_path, '{0}_combined_c_marginals.png'.format(data))
plt.savefig(file_path)
# Plot a picture of all continuous columns in a (,3) grid with all models separated
j = 0
cols = 3
rows = np.ceil((len(i_cont) / cols)*len(models))
plt.figure(figsize=(4*cols, 3*rows))
current_palette = sns.color_palette()
for k in range(len(models)):
leg = True
for i in i_cont:
j += 1
plt.subplot(rows, cols, j)
sns.distplot(real.iloc[:, i], label="Real", color=current_palette[0])
sns.distplot(samples[k].iloc[:, i], label=models[k], color=current_palette[k+1])
plt.legend('', frameon=False)
if leg:
plt.legend(loc='upper right', bbox_to_anchor=(1, 1),
ncol=4, fancybox=True, shadow=False)
leg = False
file_path = os.path.join(base_path, '{0}_separated_c_marginals.png'.format(data))
plt.savefig(file_path)
temp = real.copy()
wgan = samples_wgan.copy()
ctgan = samples_ctgan.copy()
identifier = ['Real'] * len(temp)
temp['Synthetic'] = identifier
identifier = ['WGAN'] * len(wgan)
wgan['Synthetic'] = identifier
identifier = ['CTGAN'] * len(ctgan)
ctgan['Synthetic'] = identifier
if pass_tgan:
tgan = samples_tgan.copy()
identifier = ['TGAN'] * len(tgan)
tgan['Synthetic'] = identifier
frames = [temp, wgan, tgan, ctgan]
result = pd.concat(frames)
else:
frames = [temp, wgan, ctgan]
result = pd.concat(frames)
j = 0
cols = 3
rows = int(np.ceil(len(i_cat) / cols))
if rows == 0:
rows = 1
plt.figure(figsize=(12, 3*rows))
for i in i_cat:
j += 1
temp = result[result['Synthetic'] == 'Real'].iloc[:, i]
vals = temp.value_counts(normalize=True)
id = ['Real'] * len(vals)
vals_id = list(zip(vals.index, vals, id))
rel_counts = pd.DataFrame(vals_id, columns=['Feature', 'Frequency', 'Model'])
for model in models:
temp = result[result['Synthetic'] == model].iloc[:, i]
vals = temp.value_counts(normalize=True)
id = [model]*len(vals)
vals_id = list(zip(vals.index, vals, id))
rel_counts = rel_counts.append(pd.DataFrame(vals_id, columns=['Feature', 'Frequency', 'Model']), ignore_index=True)
plt.subplot(rows, cols, j)
sns.barplot(x='Feature', y='Frequency', hue='Model', data=rel_counts)
plt.xlabel(result.columns.to_list()[i])
# (result
# .groupby(x)[y]
# .value_counts(normalize=True)
# .mul(100)
# .rename('percent')
# .reset_index()
# .pipe((sns.catplot, 'data'), x=x, y='percent', hue=y, kind='bar', ax=axes[j]))
# sns.countplot(x=real.columns.tolist()[i], data=result, hue='Synthetic', ax=axes[j])
if j == 2:
plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1),
ncol=4, fancybox=True, shadow=False)
else:
plt.legend('', frameon=False)
plt.tight_layout()
filepath = os.path.join(base_path, '{0}_all_d_marginals.png'.format(data))
if not os.path.exists(base_path):
os.makedirs(base_path)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
print(['saved figure at',filepath])
def plot_cond_marginals():
dataset = load_data('cond_cat-test1')
wgan_samples = dataset.samples.get('wgan')
ctgan_samples = dataset.samples.get('ctgan')
tgan_samples = dataset.samples.get('tgan')
cc_real = dataset.data
labels = ['ind_feat_0_label_0', 'ind_feat_0_label_1', 'ind_feat_0_label_2']
cond_labels = ['cond_feat_00_label_0', 'cond_feat_00_label_1', 'cond_feat_00_label_2']
wgan_cond = [wgan_samples['ind_feat_0'] == labels[i] for i in range(3)]
wgan = [wgan_samples['cond_feat_00'][wgan_cond[i]] for i in range(3)]
ctgan_cond = [ctgan_samples['ind_feat_0'] == labels[i] for i in range(3)]
ctgan = [ctgan_samples['cond_feat_00'][ctgan_cond[i]] for i in range(3)]
tgan_cond = [tgan_samples['ind_feat_0'] == labels[i] for i in range(3)]
tgan = [tgan_samples['cond_feat_00'][tgan_cond[i]] for i in range(3)]
real_cond = [cc_real['ind_feat_0'] == labels[i] for i in range(3)]
real = [cc_real['cond_feat_00'][real_cond[i]] for i in range(3)]
plt.figure(figsize=(20, 5))
plt.subplot(131)
sns.countplot(wgan[0], order=cond_labels)
plt.subplot(132)
sns.countplot(wgan[1], order=cond_labels)
plt.subplot(133)
sns.countplot(wgan[2], order=cond_labels)
plt.suptitle('ctgan')
plt.show()
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,110
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/wgan/data.py
|
import tensorflow as tf
import pandas as pd
from sklearn import preprocessing
def df_to_dataset(dataframe_in, shuffle=True, batch_size=32):
dataframe = dataframe_in.copy()
ds = tf.data.Dataset.from_tensor_slices(dataframe.values)
ds = ds.batch(batch_size)
return ds
def train_test(dataframe_in, fraction):
data = dataframe_in.copy()
test = data.sample(frac=fraction)
test = test.reset_index()
train = data.drop(test.iloc[:, 0].values)
train = train.reset_index(drop=True)
test = test.drop(test.columns[0], axis=1)
return train, test
def dataset_to_df(dataset, col_names, batch=False):
df = pd.DataFrame(columns=col_names)
if batch:
for batch in dataset:
df = pd.concat([df, pd.DataFrame(batch.numpy(), columns=col_names)], ignore_index=True)
else:
df = pd.concat([df, pd.DataFrame(dataset.numpy(), columns=col_names)], ignore_index=True)
return df
def load_credit_data():
data = tf.keras.utils.get_file("Credit.csv", "http://faculty.marshall.usc.edu/gareth-james/ISL/Credit.csv")
data = pd.read_csv(data)
data = data.drop(data.columns[0], axis=1)
return data
def load_adult_data():
data = pd.read_csv('C:/Users/tsjob/Documents/Python/Examensarbete/gan_thesis/datasets/adult.csv')
# data = data.drop(data.columns[0], axis = 1)
return data
def data_reorder(dataframe, cat_cols):
# put the continuous columns before the categorical
cat_df = dataframe[cat_cols]
dataframe = dataframe.drop(cat_cols, axis=1)
dataframe = pd.concat([dataframe, cat_df], axis=1)
return dataframe
class dataScaler:
def __init__(self):
self.std_scaler = preprocessing.StandardScaler()
self.oht_scaler = preprocessing.OneHotEncoder()
self.std_scaled = False
self.oht_scaled = False
def transform(self, df_in, cont_cols, cat_cols, fit=True):
df = df_in.copy()
self.original_order = df.columns
if len(cont_cols) != 0:
df = self.std_scale(df, cont_cols, fit)
if len(cat_cols) != 0:
df = self.oht_transform(df, cat_cols, fit)
return df
def inverse_transfrom(self, df_in):
df = df_in.copy()
if self.std_scaled:
df = self.inv_std_scale(df)
if self.oht_scaled:
df = self.inv_oht_transform(df)
return df[self.original_order]
def oht_transform(self, df_in, cat_cols, fit=True):
df = df_in.copy()
self.oht_scaled = True
self.cat_cols = cat_cols
self.oht_scaler.fit(df[self.cat_cols])
oht_enc = self.oht_scaler.transform(df[self.cat_cols])
oht_pd = pd.DataFrame(oht_enc.toarray(), columns=self.oht_scaler.get_feature_names())
df = df.drop(columns=self.cat_cols)
return pd.concat([df, oht_pd], axis=1)
def inv_oht_transform(self, df_in):
df = df_in.copy()
temp_cols = self.oht_scaler.get_feature_names()
inv_enc = self.oht_scaler.inverse_transform(df[temp_cols])
inv_pd = pd.DataFrame(inv_enc, columns=self.cat_cols)
df = df.drop(columns=temp_cols)
return pd.concat([df, inv_pd], axis=1)
def std_scale(self, df_in, cont_cols, fit=True):
df = df_in.copy()
self.std_scaled = True
if fit:
self.cont_cols = cont_cols
self.std_scaler.fit(df[self.cont_cols])
df[self.cont_cols] = self.std_scaler.transform(df[self.cont_cols])
return df
def inv_std_scale(self, df_in):
df = df_in.copy()
df[self.cont_cols] = self.std_scaler.inverse_transform(df[self.cont_cols])
return df
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,111
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/general/utils.py
|
import os
import sys
import pickle
import json
import csv
def load_model(path):
"""Loads a previous model from the given path"""
if not os.path.isfile(path):
print('No model is saved at the specified path.')
return
with open(path, 'rb') as f:
model = pickle.load(f)
return model
def save_model(model, path, force=False):
"""Save the fitted model at the given path."""
if os.path.exists(path) and not force:
print('The indicated path already exists. Use `force=True` to overwrite.')
return
base_path = os.path.dirname(path)
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, 'wb') as f:
pickle.dump(model, f)
print('Model saved successfully.')
def save_json(result, path):
"""Save json at the given path."""
base_path = os.path.dirname(path)
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, 'w') as f:
json.dump(result, f)
def save_csv(result, path):
"""Save csv at the given path."""
base_path = os.path.dirname(path)
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, 'w') as f:
csv.dump(result, f)
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,112
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/dataset_spec.py
|
import numpy as np
from gan_thesis.data.datagen import r_corr, rand_prop
from itertools import chain
unif = np.random.uniform # Shorthand
rint = np.random.randint
seed = 123
np.random.seed(seed)
n_samples = 10000
mvn_test1 = {
# 3 INDEPENDENT features
# 1 standard normal, 1 high mean, 1 high var
#
'n_samples': n_samples,
'mean': [0, 3, 0],
'var': [1, 1, 5],
'corr': np.eye(3).tolist(),
'seed': seed
}
mvn_test2 = {
# medium positive
# medium negative
# correlations
'n_samples': n_samples,
'mean': [0, 0, 0],
'var': [1, 1, 1],
'corr': [[1, 0.3, -0.3],
[0.3, 1, 0.8],
[-0.3, 0.8, 1]],
'seed': seed
}
mvn_test3 = {
'n_samples': n_samples,
'mean': unif(-3, 3, 9).tolist(),
'var': unif(0, 1, 9).tolist(),
'corr': r_corr(9).tolist(),
'seed': seed
}
ln_test1 = mvn_test1.copy()
ln_test2 = mvn_test2.copy()
ln_test3 = mvn_test3.copy()
# feature oberoende med
# Multi modality
# 2 modes
# 2 modes minority
# 2 modes minority close
# 5 modes
#rand_prop = unif(0, 1, 5)
#rand_prop = (rand_prop/sum(rand_prop))
mvn_mix_test1 = {
'n_samples': n_samples,
'proportions':
[0.05, 0.10, 0.35, 0.5],
'means': [[
0, 0, 0, 4
], [
0, 4, 1.5, 0
], [
0, 0, 0, 0
], [
4, 0, 0, 0
]
],
'corrs': [
np.eye(4).tolist() for i in range(4)
],
'vars': [
np.ones((4, 1)).tolist() for i in range(4)
],
'seed': seed
}
# Komplex form m
# means mellan [0,1] Var mellan 0.5, 1.5
# proportion 2-4 slump
# 3 underlying modes
# # 3 features
# corr mellan -0.2 - 0.2
#
#rand_prop = unif(0, 1, 3)
#rand_prop = (rand_prop/sum(rand_prop))
size = 3
mvn_mix_test2 = {
'n_samples': n_samples,
'proportions': rand_prop(3),
'means': [
unif(0, 8, size).tolist() for i in range(3)
],
'corrs': [
r_corr(size).tolist() for i in range(3)
],
'vars': [
unif(0, 1, size).tolist() for i in range(3)
],
'seed': seed
}
# ln COPY AV MVN
cat_test1 = {
'n_samples': n_samples,
'probabilities': [
[0.5, 0.5],
[0.1, 0.2, 0.7],
rand_prop(5).tolist()
],
'seed': seed
}
cond_cat_test1 = {
'n_samples': n_samples,
'ind_probs': [
[1/2, 3/8, 1/8]
],
'cond_probs': [
[
[
[0.8, 0.1, 0.1]
],
[
[0.1, 0.8, 0.1]],
[
[0.1, 0.1, 0.8]
]
]
],
'seed': seed
}
n_ind = 3
n_cond = [3,3,3]
n_cat = n_ind+sum(n_cond)
n_lab_ind = rint(3, 6, n_ind)
n_lab_cond = [rint(3, 6, n_cond[i]) for i in range(n_ind)]
n_labs = n_lab_ind.tolist()+list(chain(*n_lab_cond))
cond_cat_test2 = {
'n_samples' : n_samples,
'ind_probs' : [
rand_prop(size).tolist() for size in n_lab_ind
],
'cond_probs' : [[
[
rand_prop(size).tolist() for size in n_lab_cond[i]
] for j in range(n_lab_ind[i]) ] for i in range((len(n_lab_ind)))
],
'seed': seed
}
gauss_mix_cond_test1 = {
'n_samples': n_samples,
'ind_probs': [[1/3, 1/3, 1/3]],
'means': [
[
[0, 0, 0],
[2, 2, 2],
[4, 4, 4]
]
],
'vars': [
[
unif(0.5, 1.5, 3).tolist() for i in range(3)
]
],
'corrs': [
[
r_corr(3).tolist() for i in range(3)
]
],
'seed': seed
}
# Medium dataset
# 6 categorical features 2-6 labels
# first 2 defining 2 each
# each categorical defining distribution of 2 gaussian mixtures
# All gaussians means between 0 and 5
# variances between 0.5,1.5
n_ind = 2
n_cond = [2,2]
n_cat = n_ind+sum(n_cond)
n_lab_ind = rint(2, 6, n_ind)
n_lab_cond = [rint(2, 6, n_cond[i]) for i in range(n_ind)]
n_labs = n_lab_ind.tolist()+list(chain(*n_lab_cond))
n_cont_per_cat = [2 for i in (n_labs)]
gauss_mix_cond_test2 = {
'n_samples' : n_samples,
'ind_probs' : [
rand_prop(size).tolist() for size in n_lab_ind
],
'cond_probs' : [[
[
rand_prop(size).tolist() for size in n_lab_cond[i]
] for j in range(n_lab_ind[i]) ] for i in range((len(n_lab_ind)))
],
'means' : [
[
unif(0,5, n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'vars' : [
[
unif(0.5,1.2, n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'corrs' : [
[
r_corr(n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'seed': seed
}
## Large dataset 50 features
# 3 independent categorical variables with 2 dependents each
# 2-6 labels per categorical variable
# 5 gaussians per categorical variable
#same variance structure as in medium dataset
n_ind = 3
n_cond = [3 for i in range(n_ind)]
n_cat = n_ind+sum(n_cond)
n_lab_ind = rint(2, 6, n_ind)
n_lab_cond = [rint(2, 6, n_cond[i]) for i in range(n_ind)]
n_labs = n_lab_ind.tolist()+list(chain(*n_lab_cond))
n_cont_per_cat = [5 for i in (n_labs)]
gauss_mix_cond_test3 = {
'n_samples' : n_samples,
'ind_probs' : [
rand_prop(size).tolist() for size in n_lab_ind
],
'cond_probs' : [[
[
rand_prop(size).tolist() for size in n_lab_cond[i]
] for j in range(n_lab_ind[i]) ] for i in range((len(n_lab_ind)))
],
'means' : [
[
unif(0,5, n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'vars' : [
[
unif(0.5,1.2, n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'corrs' : [
[
r_corr(n_cont_per_cat[i]).tolist() for j in range(n_labs[i])
] for i in range(n_cat)
],
'seed': seed
}
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,113
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/wgan/main.py
|
from gan_thesis.models.wgan.data import load_credit_data
from gan_thesis.models.wgan.wgan import *
def main():
"""params:
output_dim: integer dimension of the output variables.
Note that this includes the one-hot encoding of the categorical varibles
latent_dim: integer dimension of random noise sampled for the generator
gen_dim: tuple with the hidden layer dimension for the generator
crit_dim tuple with hidden layer dimension for the critic
mode: 'wgan' or 'wgan-gp', deciding which loss function to use
gp_const: Gradient penalty constant. Only needed if mode == 'wgan-gp'
n_critic: Number of critic learning iterations per generator iteration
Checkpoints: yet to be added... """
testing_params = {
'output_dim': 16,
'latent_dim': 128,
'gen_dim': (256, 256),
'crit_dim': (256, 256),
'mode': 'wgan-gp',
'gp_const': 10,
'n_critic': 16
}
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
my_wgan = WGAN(testing_params)
dataframe = load_credit_data()
cat_cols = ['Gender', 'Student', 'Married', 'Ethnicity']
int_cols = ['Cards', 'Age', 'Education']
cont_cols = ['Income', 'Limit', 'Rating', 'Balance']
cols_to_scale = cont_cols + int_cols
my_wgan.train(dataframe, 100, cat_cols, cols_to_scale)
if __name__ == "__main__":
main()
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,114
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/evaluation/machine_learning.py
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.metrics import accuracy_score, f1_score, r2_score
from sklearn.neural_network import MLPClassifier, MLPRegressor
from definitions import RESULT_DIR
def prediction_score(train_x, train_y, test_x, test_y, metric, model, target):
models = {
"continuous": {
"random_forest": RandomForestRegressor(n_estimators=10),
"adaboost": AdaBoostRegressor(n_estimators=10),
"regression": LinearRegression(),
"mlp": MLPRegressor()
},
"discrete": {
"random_forest": RandomForestClassifier(n_estimators=10),
"adaboost": AdaBoostClassifier(n_estimators=10),
"regression": LogisticRegression(),
"mlp": MLPClassifier()
},
}
m = models[target][model]
m.fit(train_x, train_y)
test = m.predict(test_x)
if metric == "f1":
return np.max([f1_score(test_y, test, average='micro'), 0])
elif metric == "accuracy":
return np.max([accuracy_score(test_y, test), 0])
elif metric == "r2":
return np.max([r2_score(test_y, test), 0])
else:
raise Exception("Metric not recognized.")
def predictions_by_dimension(train, test, discrete_columns, continuous_columns):
features = train.columns.to_list()
methods = ["random_forest", "adaboost", "regression"] # mlp also available
prediction_scores = pd.DataFrame(index=features, columns=methods)
for feature_index in range(len(features)):
# drop target feature
temp_train = train.drop(train.columns[feature_index], axis=1)
temp_test = test.drop(test.columns[feature_index], axis=1)
# one-hot-encode non-target features
discrete_used_feature_indices = [feature for feature in features if
(feature in discrete_columns) &
(feature != features[feature_index])]
one_hot_train = pd.get_dummies(temp_train, columns=discrete_used_feature_indices)
one_hot_test = pd.get_dummies(temp_test, columns=discrete_used_feature_indices)
# make sure train and test have equal one-hot-encoding space
if len(one_hot_train.columns) != len(one_hot_test.columns):
for i in one_hot_train.columns:
if i not in one_hot_test.columns: one_hot_test[i] = 0
for i in one_hot_test.columns:
if i not in one_hot_train.columns: one_hot_train[i] = 0
# use the same column order for the test set as for train
one_hot_test = one_hot_test.reindex(one_hot_train.columns, axis=1)
if features[feature_index] in discrete_columns:
temp_scores = []
for method in methods:
temp_scores.append(prediction_score(
one_hot_train, train.iloc[:, feature_index],
one_hot_test, test.iloc[:, feature_index],
metric="f1", model=method, target='discrete'
))
print(temp_scores)
prediction_scores.loc[features[feature_index], :] = temp_scores
elif features[feature_index] in continuous_columns:
temp_scores = []
for method in methods:
temp_scores.append(prediction_score(
one_hot_train, train.iloc[:, feature_index],
one_hot_test, test.iloc[:, feature_index],
metric="r2", model=method, target='continuous'
))
print(temp_scores)
prediction_scores.loc[features[feature_index], :] = temp_scores
return prediction_scores
def abline(slope, intercept, ax):
"""Plot a line from slope and intercept"""
x = np.array(ax.get_xlim())
y = intercept + slope * x
ax.plot(x, y, '--')
def plot_predictions_by_dimension(real, samples, data_test, discrete_columns, continuous_columns,
dataset, model, force=True):
score_y_by_dimension = predictions_by_dimension(samples, data_test, discrete_columns, continuous_columns)
score_x_by_dimension = predictions_by_dimension(real, data_test, discrete_columns, continuous_columns)
mean_x_by_dimension = score_x_by_dimension.mean(axis=1)
mean_y_by_dimension = score_y_by_dimension.mean(axis=1)
col_type = ['Categorical' if (col in discrete_columns) else 'Continuous' for col in mean_x_by_dimension.index]
results = pd.DataFrame({'x': mean_x_by_dimension, 'y': mean_y_by_dimension, 'col_type': col_type})
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sns.scatterplot(x='x', y='y', ax=ax, hue='col_type', data=results)
ax.set_title("Machine Learning Efficiency")
ax.set_ylabel("Sample features")
ax.set_xlabel("Real features")
abline(1, 0, ax)
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist, model)
filepath = os.path.join(basepath, '{0}_{1}_ml_efficiency.png'.format(dataset, model))
if not os.path.exists(basepath):
os.makedirs(basepath)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
score_x_by_dimension.to_csv(os.path.join(basepath, '{0}_{1}_ml_real.csv'.format(dataset, model)), index=True)
score_y_by_dimension.to_csv(os.path.join(basepath, '{0}_{1}_ml_samples.csv'.format(dataset, model)), index=True)
return score_x_by_dimension, score_y_by_dimension
def plot_all_predictions_by_dimension(dataset, data):
real = dataset.train
data_test = dataset.test
discrete_columns, continuous_columns = dataset.get_columns()
samples_wgan = dataset.samples.get('wgan')
# samples_tgan = dataset.samples.get('tgan')
samples_ctgan = dataset.samples.get('ctgan')
# samples = [samples_wgan, samples_ctgan, samples_tgan]
samples = [samples_wgan, samples_ctgan]
# models = ['wgan', 'ctgan', 'tgan']
models = ['wgan', 'ctgan']
# fig, axn = plt.subplots(1, 3, figsize=(20, 6))
fig, axn = plt.subplots(1, 2, figsize=(20, 6))
alist = data.split(sep='-', maxsplit=1)
basepath = os.path.join(RESULT_DIR, *alist)
for model in models:
if not os.path.exists(os.path.join(basepath, model)):
os.makedirs(os.path.join(basepath, model))
for i in range(len(models)):
score_y_by_dimension = predictions_by_dimension(samples[i], data_test, discrete_columns, continuous_columns)
score_x_by_dimension = predictions_by_dimension(real, data_test, discrete_columns, continuous_columns)
mean_x_by_dimension = score_x_by_dimension.mean(axis=1)
mean_y_by_dimension = score_y_by_dimension.mean(axis=1)
score_x_by_dimension.to_csv(os.path.join(basepath, models[i], '{0}_{1}_ml_real.csv'.format(data, models[i])), index=True)
score_y_by_dimension.to_csv(os.path.join(basepath, models[i], '{0}_{1}_ml_samples.csv'.format(data, models[i])), index=True)
col_type = ['Categorical' if (col in discrete_columns) else 'Continuous' for col in mean_x_by_dimension.index]
results = pd.DataFrame({'x': mean_x_by_dimension, 'y': mean_y_by_dimension, 'col_type': col_type})
ax = axn[i]
sns.scatterplot(x='x', y='y', ax=ax, hue='col_type', data=results)
ax.set_title(models[i])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_ylabel("Sample features")
ax.set_xlabel("Real features")
abline(1, 0, ax)
alist = data.split(sep='-', maxsplit=1)
# dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist)
filepath = os.path.join(basepath, '{0}_all_ml_efficiency.png'.format(data))
if not os.path.exists(basepath):
os.makedirs(basepath)
plt.savefig(filepath)
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,115
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/tgan/synthesizer.py
|
from tgan.model import TGANModel
from gan_thesis.evaluation.machine_learning import plot_predictions_by_dimension
from gan_thesis.evaluation.plot_marginals import plot_marginals
from gan_thesis.evaluation.association import plot_association
from gan_thesis.evaluation.pMSE import *
from gan_thesis.data.load_data import *
from gan_thesis.models.general.utils import save_json
from gan_thesis.models.general.optimization import optimize
import os
import pandas as pd
from definitions import RESULT_DIR
from hyperopt import hp
import tensorflow as tf
EPOCHS = 100
# HYPEROPT SPACE
space = {
'embedding_dim': hp.quniform('embedding_dim', 16, 512, 2),
'gen_num_layers': hp.quniform('gen_num_layers', 100, 600, 100),
'gen_layer_sizes': hp.quniform('gen_layer_sizes', 100, 600, 100),
'crit_num_layers': hp.quniform('crit_num_layers', 1, 5, 1),
'crit_layer_sizes': hp.quniform('crit_layer_sizes', 100, 600, 100),
'learning_rate': hp.loguniform('l2scale', np.log10(10 ** -6), np.log10(0.2)),
'batch_size': hp.quniform('batch_size', 50, 500, 50)
}
# DEFAULT PARAMS
DEF_PARAMS = {
# Regular parameters
'eval': 'all',
# NN Hyperparameters
'embedding_dim': 200,
'gen_num_layers': 100,
'gen_layer_sizes': 100,
'crit_num_layers': 1,
'crit_layer_sizes': 100,
'l2scale': 10 ** -6,
'l2norm': 10 ** -5,
'learning_rate': 10 ** -3,
'batch_size': 200
}
def build_and_train(params):
tf.reset_default_graph()
gen_layers = [int(params['gen_layer_sizes'])] * int(params['gen_num_layers'])
print(gen_layers)
crit_layers = [int(params['crit_layer_sizes'])] * int(params['crit_num_layers'])
print(crit_layers)
d = params.get('dataset')
continuous_columns = d.info.get('continuous_columns')
print('Batch Size:' + str(params.get('batch_size')))
savestr = str(np.random.randint(1, 999999))
my_tgan = TGANModel(continuous_columns=continuous_columns, batch_size=int(params.get('batch_size')),
z_dim=int(params.get('embedding_dim')), learning_rate=params.get('learning_rate'),
num_gen_rnn=int(params.get('gen_num_layers')), num_gen_feature=int(params.get('gen_layer_sizes')),
num_dis_layers=int(params.get('crit_num_layers')), num_dis_hidden=int(params.get('crit_layer_sizes')),
max_epoch=EPOCHS, steps_per_epoch=50,
restore_session=False, output=savestr)
print('Fitting a TGAN model for {0} epochs...'.format(EPOCHS))
train_copy = d.train.copy()
my_tgan.fit(train_copy)
print('Successfully fitted a TGAN model')
return my_tgan
def sampler(my_tgan, params):
d = params.get('dataset')
train = d.train
samples = my_tgan.sample(len(train))
col = train.columns.to_list()
samples.columns = col
print(train.head())
samples = samples.astype(train.dtypes)
return samples
def optim_loss(samples, params):
d = params.get('dataset')
optim_df = add_indicator(real_df=d.train, synth_df=samples)
# one-hot-encode discrete features
one_hot_df = pd.get_dummies(optim_df, columns=d.info.get('discrete_columns'))
print(one_hot_df.head())
loss = pMSE(one_hot_df)
print(loss)
return loss
def main(params=None, optim=True):
if params is None:
params = {
# Regular parameters
'training_set': 'ln',
'eval': 'all',
# NN Hyperparameters
'embedding_dim': 128,
'gen_num_layers': 2,
'gen_layer_sizes': 256,
'crit_num_layers': 2,
'crit_layer_sizes': 256,
'learning_rate': 10**-6,
'batch_size': 500,
'training_iter': 1
}
if optim:
params.update(space) # Overwrite NN hyperparameters with stochastic variant from top of file
print('Starting TGAN main script with following parameters:')
for key in params:
print(key, params[key])
params['model'] = 'tgan'
# Load dataset
dataset = load_data(params.get('training_set'))
params['dataset'] = dataset
print('Successfully loaded dataset {0}'.format(params.get('training_set')))
if params['model'] in dataset.samples:
# If we are here, we have already generated samples for this test setup (identifier/dataset/model)
samples = dataset.samples.get(params['model'])
else:
# Train model and Generate samples
if optim:
# Optimize or load TGAN model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model') + '_optimized')
if os.path.isfile(filename):
my_tgan = TGANModel.load(filename)
print('Successfully loaded old optimized TGAN model from {0}'.format(filename))
else:
best, trials = optimize(params, filename+'.json')
best['dataset'] = dataset
my_tgan = build_and_train(best)
my_tgan.save(filename)
print('Saved the optimized TGAN model at {0}'.format(filename))
else:
# Train or load CTGAN model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model') + '_default')
if os.path.isfile(filename):
# my_tgan = TGANModel.load(filename)
print('Successfully loaded old TGAN model from {0}'.format(filename))
else:
my_tgan = build_and_train(params=params)
# my_tgan.save(filename)
print('Saved the TGAN model at {0}'.format(filename))
# Sample from model
print('Sampling from the TGAN model...')
samples = sampler(my_tgan, params)
save_samples(samples, params['training_set'], model=params.get('model'), force=True)
print('Saved the TGAN samples')
# Evaluate fitted model
if params['eval'] == 'all':
print('Starting MLE evaluation on samples...')
discrete_columns, continuous_columns = dataset.get_columns()
plot_predictions_by_dimension(real=dataset.train, samples=samples, data_test=dataset.test,
discrete_columns=discrete_columns, continuous_columns=continuous_columns,
dataset=params.get('training_set'), model=params.get('model'))
print('Plotting marginals of real and sample data...')
plot_marginals(dataset.train, samples, params.get('training_set'), params.get('model'))
print('Plotting association matrices...')
diff = plot_association(dataset, samples, params.get('training_set'), params.get('model'))
print(diff)
alist = params.get('training_set').split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist, params.get('model'))
filepath = os.path.join(basepath, '{0}_{1}_c_marginals.png'.format(dataset, params.get('model')))
save_json(diff, filepath)
if __name__ == "__main__":
main(params=None, optim=True)
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,116
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/params.py
|
import numpy as np
from scipy.stats import random_correlation
mvn_test1 = {
# 3 INDEPENDENT features
# 1 standard normal, 1 high mean, 1 high var
#
'n_samples' : 10000,
'mean' : [0 ,3, 0],
'var' : [1, 1, 5],
'corr' : np.eye(3).tolist()
}
mvn_test2 = {
#medium positive
#medium negative
#correlations
'n_samples' : 10000,
'mean' : [0, 0, 0],
'var' : [1, 1, 1],
'corr' : [[1, 0.3, -0.3],
[0.3, 1, 0.8],
[-0.3, 0.8, 1]]
}
mvn_test1_highfeature = {
#medium positive
#medium negative
#correlations
'n_samples' : 10000,
'mean' : np.zeros(shape=(1,10)),
'var' : np.ones(shape=(1,10)),
'corr' : [[1, 0.3, -0.3],
[0.3, 1, 0.8],
[-0.3, 0.8, 1]]
}
def mvn_test1_highfeature():
mean = np.zeros(9)
var = np.ones(9)
corr = corr_matrix(9)
return {
'n_samples': 20000,
'mean': mean.tolist(),
'var': var.tolist(),
'corr': corr.tolist()
}
def mvn_test2_highfeature():
mean = np.random.uniform(-3, 3, 9)
var = np.random.uniform(1, 3, 9)
corr = corr_matrix(9)
return {
'n_samples': 20000,
'mean': mean.tolist(),
'var': var.tolist(),
'corr': corr.tolist()
}
def corr_matrix(n):
eigs = np.random.uniform(size=n)
eigs = eigs * n / sum(eigs)
C = random_correlation.rvs(eigs)
return C
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,117
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/wgan/wgan_mod.py
|
import os
import pickle
import time
from functools import partial
import numpy as np
import datetime
import pandas as pd
from tensorflow.keras import layers
from tensorflow.keras.metrics import Mean
from gan_thesis.models.wgan.utils import *
from gan_thesis.models.wgan.data import *
class WGAN:
def __init__(self):
"""Main WGAN Model
Args: Dictionary with
output_dim:
Integer dimension of the output variables including
the one-hot encoding of the categorical variables
embedding_dim:
Integer dimension of random noise sampled for the generator
gen_dim:
Tuple with the hidden layer dimension for the generator
crit_dim:
Tuple with hidden layer dimension for the critic
mode:
'wgan' or 'wgan-gp', deciding which loss function to use
gp_const:
Gradient penalty constant. Only needed if mode == 'wgan-gp'
n_critic:
Number of critic learning iterations per generator iteration
log_directory:
Directory of tensorboard logs
Checkpoints: yet to be added...
"""
self.epoch_trained = 0
self.initialized = False
def initialize(self, df, cont_cols, cat_cols, input_params):
params = {
'embedding_dim' : 128, #dimenstion of random input samples
'mode' : 'wgan-gp', #'wgan' or 'wgan-gp'
'n_critic' : 5, #number of iterations of critic between generator iterations
'gp_const' : 10, #weight on gradient penalty
'gen_dim' : (256,256), # tuple of hidden dimensions of generator
'crit_dim' : (256, 256), #tupe of hidden dimensions of critic
'beta1' : 0.5, # Adam
'beta2': 0.9, # Adam
'lr' : 10**-4, # Adam
'hard' : False, # Straight through gumbel-softmax
'temperature' : 0.2, # Gumbel softmax temperature
'temp_anneal' : False, # temperature annealing
'input_time' : False, #Tensorboard logging purposes
'log_directory' : False, #tensorboard logs
'n_pac' : 1
}
for key in input_params:
params[key] = input_params[key]
self.latent_dim = params['embedding_dim']
self.mode = params['mode']
self.lr = params['lr']
self.beta1 = params['beta1']
self.beta2 = params['beta2']
self.n_critic = params['n_critic']
self.temperature = params['temperature']
self.temp_anneal = params['temp_anneal']
self.hard = params['hard']
if self.mode == 'wgan-gp':
self.gp_const = params['gp_const']
self.input_time = params['input_time']
self.log_dir = params['log_directory']
self.n_pac = params['n_pac']
self.cat_dims = tuple([len(df[cat]) for cat in cat_cols])
self.orignal_order_cols = list(df.columns)
gen_dim = params['gen_dim']
crit_dim = params['crit_dim']
self.generator = self.make_generator(gen_dim)
self.critic = self.make_critic(self.n_pac*crit_dim)
self.gen_opt, self.crit_opt = self.get_opts()
def make_generator(self, gen_dim):
inputs = keras.Input(shape=(self.latent_dim,))
if type(gen_dim) == int:
temp_layer = layers.Dense(gen_dim,
kernel_initializer='normal')(inputs)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.ReLU()(temp_layer)
else:
temp_layer = layers.Dense(gen_dim[0],
kernel_initializer='normal')(inputs)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.ReLU()(temp_layer)
for shape in gen_dim[1:]:
temp_layer = layers.Dense(shape,
kernel_initializer='normal')(temp_layer)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.ReLU()(temp_layer)
outputs = layers.Dense(self.n_cont+self.n_cat_oht)(temp_layer)
# cont_output = layers.Dense(self.n_cont, activation='tanh')(temp_layer)
# cat_output = layers.Dense(self.n_cat_oht)(temp_layer)
# outputs = layers.Concatenate(axis=1)([cont_output, cat_output])
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def make_critic(self, crit_dim):
inputs = keras.Input(shape=((self.n_cont+self.n_cat_oht), ))
if self.mode == 'wgan':
constraint = ClipConstraint(0.01)
else:
constraint = None
if type(crit_dim) == int:
temp_layer = layers.Dense(crit_dim,
kernel_constraint=constraint)(inputs)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.LeakyReLU()(temp_layer)
else:
temp_layer = layers.Dense(crit_dim[0],
kernel_constraint=constraint)(inputs)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.LeakyReLU()(temp_layer)
for shape in crit_dim[1:]:
temp_layer = layers.Dense(shape,
kernel_constraint=constraint)(temp_layer)
temp_layer = layers.BatchNormalization()(temp_layer)
temp_layer = layers.LeakyReLU()(temp_layer)
outputs = layers.Dense(1)(temp_layer)
model = keras.Model(inputs=inputs, outputs=outputs, name = 'Critic')
return model
def get_opts(self):
if self.mode == 'wgan':
gen_opt = keras.optimizers.RMSprop(self.lr)
crit_opt = keras.optimizers.RMSprop(self.lr)
elif self.mode == 'wgan-gp':
gen_opt = keras.optimizers.Adam(learning_rate=self.lr, beta_1=self.beta1, beta_2=self.beta2)
crit_opt = keras.optimizers.Adam(learning_rate=self.lr, beta_1=self.beta1, beta_2=self.beta2)
return gen_opt, crit_opt
def sample_df(self, n, temperature=0.2, hard=True, scaled=False):
array_sample = self.sample(n, temperature, hard).numpy()
df_sample = pd.DataFrame(array_sample, columns=self.oht_shuff_cols)
# if not scaled:
# df_sample = self.scaler.inverse_transfrom(df_sample)
# df_sample = df_sample[self.orignal_order_cols]
return df_sample
def sample(self, n, temperature=0.2, hard=True):
noise = tf.random.normal((n, self.latent_dim))
sample = self.generator(noise, training=False)
sample = self.apply_activate(sample)
return sample
def scale_data(self, df, cont_cols, cat_cols, fit):
df = df.copy()
if self.initialized == False:
self.orignal_order_cols = list(df.columns)
self.scaler = dataScaler()
df = data_reorder(df, cat_cols)
df = self.scaler.transform(df, cont_cols, cat_cols, fit)
if self.initialized == False:
self.n_cont, self.n_cat_oht = len(cont_cols) , (len(df.columns)-len(cont_cols))
df = df.astype('float32')
self.oht_shuff_cols = list(df.columns)
return df
def apply_activate(self, data_batch):
#numerical activation
ret_data = data_batch
# ret_data = ret_data[:,0:self.n_cont]
# ret_data = tf.nn.tanh(ret_data)
# ret_data = tf.concat([ret_data, data_batch[:, self.n_cont:]], axis = 1)
#categorical activation
if self.cat_dims != ():
ret_data = sample_gumbel(ret_data, self.temperature, self.cat_dims, self.hard)
return ret_data
def train(self, dataframe, epochs, batch_size = 500, params = {}, cont_cols = [], cat_cols = [], shuffle = True, new_data = False):
df = dataframe.copy()
self.batch_size = batch_size
if self.initialized == False:
df = self.scale_data(df, cont_cols, cat_cols, True)
self.n_cont, self.n_cat_oht = len(cont_cols) , (len(df.columns)-len(cont_cols))
self.initialize(df, cont_cols, cat_cols, params)
self.initialized = True
else:
df = self.scale_data(df, cont_cols, cat_cols, new_data)
dataset = df_to_dataset(df, shuffle, self.batch_size)
loss_li = self.train_ds(dataset, epochs, len(df), self.batch_size, self.cat_dims, self.hard, self.temp_anneal, self.input_time)
self.epoch_trained += epochs
return loss_li
def train_ds(self, dataset, epochs, n_data, batch_size=500, cat_dims=(), hard=False, temp_anneal = False, input_time = False):
self.cat_dims = cat_dims
temp_increment = self.temperature/epochs # for temperature annealing
self.g_loss = Mean('generator_loss', dtype = tf.float64)
self.c_loss = Mean('critic_loss', dtype = tf.float64)
if self.log_dir:
current_time = input_time if input_time else datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
generator_log_dir = self.log_dir+'\\logs\\'+current_time+'\\gradient_tape\\generator'
critic_log_dir = self.log_dir+ '\\logs\\'+current_time+'\\gradient_tape\\critic'
generator_summary_writer = tf.summary.create_file_writer(generator_log_dir)
critic_summary_writer = tf.summary.create_file_writer(critic_log_dir)
for epoch in range(self.epoch_trained,self.epoch_trained+epochs):
start = time.time()
g_loss = 0
c_loss = 0
counter = 0
loss_li = [[], []]
#trace = True # Tensorboard tracing, currently not working
for data_batch in dataset:
# if trace:
# tf.summary.trace_on(graph = True, profiler = True)
c_loss = self.train_step_c(data_batch, hard)
# if trace:
# with critic_summary_writer.as_default():
# tf.summary.trace_export(
# name = 'critic_trace', step = 0, profiler_outdir = critic_log_dir
# )
if counter % self.n_critic == 0:
# if trace:
# tf.summary.trace_on(graph = True, profiler = True)
g_loss = self.train_step_g(batch_size, hard)
# if trace:
# with generator_summary_writer.as_default():
# tf.summary.trace_export(
# 'generator_trace', step = 0, profiler_outdir = generator_log_dir
# )
# start = False
counter += 1
if self.log_dir:
with critic_summary_writer.as_default():
tf.summary.scalar('loss', c_loss, step = epoch)
with generator_summary_writer.as_default():
tf.summary.scalar('loss', g_loss, step = epoch)
loss_li[0].append(c_loss.numpy())
loss_li[1].append(g_loss.numpy())
if (epoch + 1) % 5 == 0:
# Checkpooint functionality here
print('Epoch: {}, Time Elapsed:{} sec \n Critic Loss: {} Generator Loss: {}'.format(epoch + 1,
np.round(time.time() - start, 4),
my_tf_round(c_loss, 4), my_tf_round(g_loss,4)))
#if (temp_anneal):
# self.set_temperature(self.temperature-temp_increment)
dataset = dataset.shuffle(buffer_size=10000)
return loss_li
@tf.function
def train_step_c(self, data_batch, hard):
tot_dim = data_batch.shape[1]
start_cat_dim = tot_dim - sum(self.cat_dims)
noise = tf.random.normal((len(data_batch), self.latent_dim))
with tf.GradientTape() as crit_tape:
fake_data = self.generator(noise, training=True)
fake_data = self.apply_activate(fake_data)
real_output = self.critic(data_batch, training=True)
fake_output = self.critic(fake_data, training=True)
crit_loss = critic_loss(real_output, fake_output)
if self.mode == 'wgan-gp':
gp_loss = self.gp_const * gradient_penalty(partial(self.critic), data_batch, fake_data)
crit_loss += gp_loss
critic_gradients = crit_tape.gradient(crit_loss , self.critic.trainable_variables)
self.crit_opt.apply_gradients(zip(critic_gradients, self.critic.trainable_variables))
self.c_loss(crit_loss)
return crit_loss
@tf.function
def train_step_g(self, batch_size, hard):
noise = tf.random.normal((batch_size, self.latent_dim))
with tf.GradientTape() as gen_tape:
fake_data = self.generator(noise, training=True)
gen_tape.watch(fake_data)
fake_data = self.apply_activate(fake_data)
fake_output = self.critic(fake_data, training=True)
gen_loss = generator_loss(fake_output)
generator_gradients = gen_tape.gradient(gen_loss, self.generator.trainable_variables)
self.gen_opt.apply_gradients(zip(generator_gradients, self.generator.trainable_variables))
self.g_loss(gen_loss)
print(generator_gradients)
return gen_loss
def set_temperature(self, temperature):
self.temperature = temperature
def save(self, path, force=False):
"""Save the fitted model at the given path."""
if os.path.exists(path) and not force:
print('The indicated path already exists. Use `force=True` to overwrite.')
return
base_path = os.path.dirname(path)
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, 'wb') as f:
pickle.dump(self, f)
print('Model saved successfully.')
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,118
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/general/optimization.py
|
from hyperopt import STATUS_OK, hp, tpe, Trials, fmin
import os
from gan_thesis.models.general.utils import save_json, HiddenPrints
def optimize(space, file_path=None, max_evals=5):
if space.get('model') == 'ctgan':
from gan_thesis.models.ctgan.synthesizer import build_and_train, sampler, optim_loss
elif space.get('model') == 'tgan':
from gan_thesis.models.tgan.synthesizer import build_and_train, sampler, optim_loss
# elif space.get('model') == 'wgan':
# from gan_thesis.models.wgan.synthesizer import build_and_train, sampler, optim_loss
def objective(params):
"""Objective function for GAN Hyperparameter Tuning"""
# with HiddenPrints(): # Suppresses normal print functions
my_gan = build_and_train(params)
samples = sampler(my_gan, params)
loss = optim_loss(samples.data, params)
params['loss'] = loss
# save_json(params, os.path.join(__file__, ))
del my_gan, samples
# Dictionary with information for evaluation
return {'loss': loss, 'params': params, 'status': STATUS_OK}
# Trials object to track progress
bayes_trials = Trials()
best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=max_evals)
if file_path is not None:
save_json(best, file_path)
return best, bayes_trials
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,119
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/ctgan/synthesizer.py
|
from ctgan import CTGANSynthesizer
from gan_thesis.evaluation.machine_learning import plot_predictions_by_dimension
from gan_thesis.evaluation.plot_marginals import plot_marginals
from gan_thesis.evaluation.association import plot_association
from gan_thesis.evaluation.pMSE import *
from gan_thesis.data.load_data import *
from gan_thesis.models.general.utils import save_model, load_model, save_json
from gan_thesis.models.general.optimization import optimize
import os
import pandas as pd
from definitions import RESULT_DIR
#from hyperopt import hp
EPOCHS = 1000
# HYPEROPT SPACE
# space = {
# 'embedding_dim': hp.quniform('embedding_dim', 16, 512, 2),
# 'gen_num_layers': hp.quniform('gen_num_layers', 1, 5, 1),
# 'gen_layer_sizes': hp.quniform('gen_layer_sizes', 16, 512, 2),
# 'crit_num_layers': hp.quniform('crit_num_layers', 1, 5, 1),
# 'crit_layer_sizes': hp.quniform('crit_layer_sizes', 16, 512, 2),
# 'l2scale': hp.loguniform('l2scale', np.log10(10 ** -6), np.log10(0.2)),
# 'batch_size': hp.quniform('batch_size', 50, 500, 50)
# }
DEF_PARAMS = {
# Regular parameters
'eval': 'all',
# NN Hyperparameters
'embedding_dim': 128,
'gen_num_layers': 2,
'gen_layer_sizes': 256,
'crit_num_layers': 2,
'crit_layer_sizes': 256,
'l2scale': 10 ** -6,
'batch_size': 500
}
def build_and_train(params):
gen_layers = [int(params['gen_layer_sizes'])] * int(params['gen_num_layers'])
print(gen_layers)
crit_layers = [int(params['crit_layer_sizes'])] * int(params['crit_num_layers'])
print(crit_layers)
my_ctgan = CTGANSynthesizer(embedding_dim=int(params['embedding_dim']),
gen_dim=gen_layers,
dis_dim=crit_layers,
batch_size=int(params['batch_size']),
l2scale=params['l2scale'])
print('Fitting a CTGAN model for {0} epochs...'.format(EPOCHS))
d = params.get('dataset')
my_ctgan.fit(d.train, d.info.get('discrete_columns'), epochs=EPOCHS)
print('Successfully fitted a CTGAN model')
return my_ctgan
def sampler(my_ctgan, params):
d = params.get('dataset')
samples = my_ctgan.sample(100000)
col = d.train.columns
samples.columns = col
samples = samples.astype(d.train.dtypes)
return samples
def optim_loss(samples, params):
d = params.get('dataset')
optim_df = add_indicator(real_df=d.train, synth_df=samples)
# one-hot-encode discrete features
one_hot_df = pd.get_dummies(optim_df, columns=d.info.get('discrete_columns'))
print(one_hot_df.head())
loss = pMSE(one_hot_df)
print(loss)
return loss
def main(params=None, optim=True):
if params is None:
params = {
# Regular parameters
'training_set': 'mvn-test2',
'eval': 'all',
# NN Hyperparameters
'embedding_dim': 128,
'gen_num_layers': 2,
'gen_layer_sizes': 256,
'crit_num_layers': 2,
'crit_layer_sizes': 256,
'l2scale': 10**-6,
'batch_size': 500
}
if optim:
params.update(space) # Overwrite NN hyperparameters with stochastic variant from top of file
print('Starting CTGAN main script with following parameters:')
for key in params:
print(key, params[key])
params['model'] = 'ctgan'
# Load dataset
dataset = load_data(params.get('training_set'))
params['dataset'] = dataset
print('Successfully loaded dataset {0}'.format(params.get('training_set')))
if params['model'] in dataset.samples:
# If we are here, we have already generated samples for this test setup (identifier/dataset/model)
samples = dataset.samples.get(params['model'])
else:
if optim:
# Optimize or load CTGAN model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model') + '_optimized')
if os.path.isfile(filename):
my_ctgan = load_model(filename)
print('Successfully loaded old optimized CTGAN model from {0}'.format(filename))
else:
best, trials = optimize(params, filename+'.json')
best['dataset'] = dataset
my_ctgan = build_and_train(best)
save_model(my_ctgan, filename, force=True)
print('Saved the optimized CTGAN model at {0}'.format(filename))
else:
# Train or load CTGAN model
filename = os.path.join(RESULT_DIR, params.get('training_set'), params.get('model') + '_default')
if os.path.isfile(filename):
# my_ctgan = load_model(filename)
print('Successfully loaded old CTGAN model from {0}'.format(filename))
else:
my_ctgan = build_and_train(params=params)
# save_model(my_ctgan, filename, force=True)
print('Saved the CTGAN model at {0}'.format(filename))
# Sample from model
print('Sampling from the CTGAN model...')
samples = sampler(my_ctgan, params)
save_samples(samples, params['training_set'], model=params.get('model'), force=True)
print('Saved the CTGAN samples')
# Evaluate fitted model
if params['eval'] == 'all':
print('Starting MLE evaluation on samples...')
discrete_columns, continuous_columns = dataset.get_columns()
plot_predictions_by_dimension(real=dataset.train, samples=samples, data_test=dataset.test,
discrete_columns=discrete_columns, continuous_columns=continuous_columns,
dataset=params.get('training_set'), model=params.get('model'))
print('Plotting marginals of real and sample data...')
plot_marginals(dataset.train, samples, params.get('training_set'), params.get('model'))
print('Plotting association matrices...')
diff = plot_association(dataset, samples, params.get('training_set'), params.get('model'))
print(diff)
alist = params.get('training_set').split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist, params.get('model'))
filepath = os.path.join(basepath, '{0}_{1}_c_marginals.png'.format(dataset, params.get('model')))
save_json(diff, filepath)
if __name__ == "__main__":
main(params=None, optim=False)
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,120
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/models/wgan/utils.py
|
import tensorflow as tf
from tensorflow import keras
import tensorflow_probability as tfp
class ClipConstraint(keras.constraints.Constraint):
# Enforces clipping constraints in WGAN
def __init__(self, clip_value):
self.clip_value = clip_value
def __call__(self, weights):
return keras.backend.clip(weights, -self.clip_value, self.clip_value)
def get_config(self):
return {'clip_value': self.clip_value}
def gumb_samp(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random.uniform(shape,minval=0,maxval=1)
return -tf.math.log(-tf.math.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + gumb_samp(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def sample_gumbel(logits, temperature, cat_dims=(), hard=False):
start_dim = tf.shape(logits)[1] - sum(cat_dims)
for dim in cat_dims: # Draw gumbel soft-max for each categorical variable
temp_logits = logits[:, start_dim:start_dim + dim]
#dist = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=temp_logits)
#temp_logits = dist.sample()
temp_logits = gumbel_softmax_sample(temp_logits, temperature)
if hard: # make One_hot
logits_hard = tf.cast(tf.equal(temp_logits,tf.reduce_max(temp_logits,1,keepdims=True)),temp_logits.dtype)
temp_logits = tf.stop_gradient(logits_hard - temp_logits) + temp_logits
#temp_logits = tf.one_hot(tf.math.argmax(temp_logits, axis=1), dim)
logits = tf.concat([logits[:, :start_dim], temp_logits, logits[:, start_dim + dim:]], axis=1)
# logits[:, start_dim:start_dim+dim] = temp_logits
start_dim += dim
return logits
def gradient_penalty(f, real_data, fake_data):
alpha = tf.random.uniform(shape=[real_data.shape[0], 1])
inter = alpha * real_data + (1 - alpha) * fake_data
with tf.GradientTape() as t:
t.watch(inter)
pred = f(inter)
grad = t.gradient(pred, [inter])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(grad), axis=1))
gp = tf.reduce_mean((slopes - 1) ** 2)
return gp
def wasserstein_loss(y_real, y_critic):
return keras.backend.mean(y_real * y_critic)
def critic_loss(real_output, fake_output):
real_loss = wasserstein_loss(real_output, tf.ones_like(real_output))
fake_loss = wasserstein_loss(fake_output, -tf.ones_like(fake_output))
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return wasserstein_loss(tf.ones_like(fake_output), fake_output)
def my_tf_round(x, decimals = 0):
multiplier = tf.constant(10**decimals, dtype=x.dtype)
return tf.round(x * multiplier) / multiplier
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,121
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/data/datagen.py
|
import numpy as np
import pandas as pd
from scipy.stats import random_correlation
def multivariate_df(n_samples, mean, var, corr, seed=False, name = 'c'):
if seed:
np.random.seed(seed)
cov = corr_var_to_cov(corr, var)
if (len(mean) == 1):
data = np.random.normal(mean, cov[0]**2, n_samples)
else:
data = np.random.multivariate_normal(mean, cov, n_samples)
cols = col_name_gen(len(mean), name)
df = pd.DataFrame(data, columns=cols)
info = {
'type': 'mvn',
'mean': mean,
'correlation' : corr,
'variance': var,
'dim' : len(mean)
}
return df, info
def log_normal_df(n_samples, mean, var, corr, seed=False):
df, info = multivariate_df(n_samples, mean, var, corr, seed)
df = df.applymap(lambda x: np.exp(x))
info['type'] = 'log-normal'
return df, info
def mixture_gauss(n_samples, proportions, means, varis, corrs, seed=False):
# Note that means and var, corr are lists of means, variances and Correlation matrices
info = {}
if seed:
np.random.seed(seed)
k = len(means)
cols = col_name_gen(len(means[0]), 'c')
df = pd.DataFrame(columns=cols)
n_samples_li = np.random.multinomial(n_samples, proportions)
for i in range(k):
temp_df, temp_info = multivariate_df(n_samples_li[i],
means[i], varis[i], corrs[i], seed)
df = pd.concat((df, temp_df))
temp_info['Proportion of total'] = proportions[i]
info['dist ' + str(i)] = temp_info
info['dim'] = len(means[0])
return df, info
def mixture_log_normal(n_samples, proportions, means, varis, corrs, seed=False):
# Note that means and covs are lists of means and Covariance matrices
info = {}
if seed:
np.random.seed(seed)
k = len(means)
cols = col_name_gen(len(means[0]), 'c')
df = pd.DataFrame(columns=cols)
n_samples_li = np.random.multinomial(n_samples, proportions)
for i in range(k):
temp_df, temp_info = log_normal_df(n_samples_li[i],
means[i], varis[i], corrs[i], seed)
df = pd.concat((df, temp_df))
temp_info['Proportion of total'] = proportions[i]
info['dist ' + str(i)] = temp_info
info['dim'] = len(means[0])
return df, info
def cat_mixture_gauss(cond_df, cond_info, means, varis, corrs, seed = False):
# Note label in feature vectors need have names in the same order
# as means/covs
#
info = {'Conditional info' : cond_info,
'mixture info': {} }
if seed:
np.random.seed(seed)
dim_count = cond_info['dim']
unique = []
n_samples = []
for i in range(len(cond_df.columns)): #For each categorical features
temp_li = cond_df[cond_df.columns[i]].unique() #Find unique labels of features
temp_li.sort()
unique.append(temp_li)
temp_li = []
for j in range(len(unique[i])): #Find number of samples with i,j label
temp_li.append(
sum(cond_df[cond_df.columns[i]]==unique[i][j])
)
n_samples.append(temp_li)
for i in range(len(unique)): #For every categorical feature
df = pd.DataFrame()
dim_count += len(means[i][0])
for j in range(len(unique[i])): #for each unique label
temp_df, temp_info = multivariate_df(n_samples[i][j], means[i][j],
varis[i][j], corrs[i][j], name = (cond_df.columns[i]+ '_c'))
df = pd.concat((df, temp_df))
df = df.reset_index(drop = True)
info['mixture info']['Cat_feature_{0} label_{1}'.format(str(i),str(j))] = temp_info
cond_df = cond_df.sort_values(cond_df.columns[i]).reset_index(drop=True)
cond_df = pd.concat((cond_df, df), axis = 1)
info['dim'] = dim_count
return cond_df, info
def multinomial(n_samples, probabilities, seed=False, name='f_'):
# n_samples: int , probabilites = nested list of probabilities
info = {}
if seed:
np.random.seed(seed)
column_names = col_name_gen(len(probabilities), name)
df = pd.DataFrame(columns=column_names)
count = 0
for prob in probabilities:
temp_label_names = col_name_gen(len(prob), name+str(count)+'_l_')
temp_data = np.random.choice(temp_label_names, size=n_samples, p=prob)
df[column_names[count]] = temp_data
info[column_names[count]] = prob
count += 1
info['dim'] = sum(map(lambda prob: len(prob), probabilities))
return df, info
def multinomial_cond(n_samples, ind_probabilities, cond_probabilities, seed=False):
# n_samples: int,
# ind_probabilities : nested list of probabilities one per feature,
# cond_probabilities : double nested list of probabilities with [0][0] representing p(cond_feature_1|ind_feature_1=label_1)
# Example Use:
# multinomial_cond(20, [[0.5, 0.5],[0.5, 0.5]], [
# [
# [
# [0.8, 0.2],[0, 1]
# ], [
# [0, 1],[1, 0]
# ]
# ], [
# [
# [1, 0, 0], [0, 1]
# ], [
# [0, 0, 1], [1, 0]
# ], [
# [0.1, 0.4, 0.5], [1, 0]
# ]
# ]
# ]
info = {}
if seed:
np.random.seed()
ind_df, ind_info = multinomial(
n_samples, ind_probabilities, seed, 'indf_')
cond_df = pd.DataFrame()
dim_count = ind_info['dim']
info['source distributions'] = ind_info
for i in range(len(ind_probabilities)):
cond_df = pd.DataFrame()
unique_labels = ind_df[ind_df.columns[i]].unique()
unique_labels.sort()
ind_df = ind_df.sort_values(ind_df.columns[i])
ind_df = ind_df.reset_index(drop=True)
temp_li1 = []
temp_li2 = []
for j in range(len(unique_labels)):
temp_n = len(ind_df[ind_df[ind_df.columns[i]] == unique_labels[j]])
temp_df, temp_info = multinomial(
temp_n, cond_probabilities[i][j], seed, 'cf_'+str(i))
temp_li1.append(temp_df)
temp_info['conditional on'] = unique_labels[j]
temp_li2.append(temp_info)
dim_count += temp_info['dim']
temp = pd.concat(temp_li1)
cond_df = pd.concat((cond_df, temp), axis=0)
cond_df = cond_df.reset_index(drop=True)
ind_df = pd.concat([ind_df, cond_df], axis=1)
info['conditional on ' +str(i)] = temp_li2
info['dim'] = dim_count
return ind_df, info
def multinomial_cond_extension(n_samples, true_ind_prob, ind_prob, cond_prob, seed = False):
##Used to add truly independent multinomials to a conditional set
info = {}
if seed:
np.random.seed()
true_ind_df, true_ind_info = multinomial(n_samples, true_ind_prob, seed, 'tif_')
cond_df, cond_info = multinomial_cond(n_samples, ind_prob, cond_prob, seed)
df = pd.concat((true_ind_df, cond_df), axis=1)
info['true independent'] = true_ind_info
info['conditionals'] = cond_info
info['dim'] = cond_info['dim'] + true_ind_info['dim']
return df, info
### Helper functions
def corr_var_to_cov(corr, var):
corr = np.array(corr)
var = np.array(np.sqrt(var))
res = corr*var
var = var.reshape(len(var),1)
res = res*var
return res
def r_corr(size):
r_arr = np.random.uniform(0,5, size = size)
r_arr = size*r_arr/sum(r_arr)
return random_correlation.rvs(r_arr)
def normalize(vec):
return vec/sum(vec)
def rand_prop(size):
return(normalize(np.random.uniform(0,1,size = size)))
def col_name_gen(num_cols, common_name):
common_name_list = [common_name]*num_cols
num_string_list = [num_string for num_string in map(
lambda num: str(num), [num for num in range(num_cols)]
)]
res_list = [a + b for a, b in zip(common_name_list, num_string_list)]
return res_list
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,122
|
TVSjoberg/gan-dump
|
refs/heads/master
|
/evaluation/association.py
|
from sklearn.metrics import mutual_info_score, normalized_mutual_info_score
from scipy.stats import spearmanr, pearsonr
from scipy.spatial.distance import euclidean
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from definitions import RESULT_DIR
from gan_thesis.data.load_data import Dataset
from gan_thesis.models.general.utils import save_json
def association(dataset, split=False):
data = dataset.data
discrete_columns, continuous_columns = dataset.get_columns()
columns = data.columns.to_list()
if not split:
association_matrix = np.zeros(shape=(len(columns), len(columns)))
for i in range(len(columns)):
for j in range(i):
if (columns[i] in continuous_columns) and (columns[j] in continuous_columns):
association_matrix[i, j] = pearsonr(data.iloc[:, i], data.iloc[:, j])[0]
if (columns[i] in discrete_columns) and (columns[j] in discrete_columns):
association_matrix[i, j] = normalized_mutual_info_score(data.iloc[:, i], data.iloc[:, j])
if (columns[i] in continuous_columns) and (columns[j] in discrete_columns):
bin_nmi = mutual_info_score_binned(data.iloc[:, i], data.iloc[:, j], bin_axis=[True, False])
association_matrix[i, j] = bin_nmi
association_matrix[j, i] = bin_nmi
return pd.DataFrame(association_matrix, index=columns, columns=columns)
else:
contcont_matrix = np.ones(shape=(len(continuous_columns), len(continuous_columns)))
catcat_matrix = np.ones(shape=(len(discrete_columns), len(discrete_columns)))
contcat_matrix = np.ones(shape=(len(continuous_columns), len(discrete_columns)))
for i in range(len(columns)):
for j in range(i):
if (columns[i] in continuous_columns) and (columns[j] in continuous_columns):
contcont_matrix[i, j] = pearsonr(data.iloc[:, i], data.iloc[:, j])[0]
if (columns[i] in discrete_columns) and (columns[j] in discrete_columns):
catcat_matrix[i, j] = normalized_mutual_info_score(data.iloc[:, i], data.iloc[:, j])
if (columns[i] in continuous_columns) and (columns[j] in discrete_columns):
bin_nmi = mutual_info_score_binned(data.iloc[:, i], data.iloc[:, j], bin_axis=[True, False])
contcat_matrix[i, j] = bin_nmi
return pd.DataFrame(contcont_matrix, index=continuous_columns, columns=continuous_columns), pd.DataFrame(
catcat_matrix, index=discrete_columns, columns=discrete_columns), pd.DataFrame(contcat_matrix,
index=continuous_columns,
columns=discrete_columns),
def mutual_info_score_binned(x, y, bin_axis=None, bins=100):
if bin_axis is None:
bin_axis = [True, False] # Bin x, don't bin y
x = pd.cut(x, bins=bins) if bin_axis[0] else x
y = pd.cut(y, bins=bins) if bin_axis[1] else y
return normalized_mutual_info_score(x, y)
def association_difference(real=None, samples=None, association_real=None, association_samples=None):
if (association_real is None) or (association_samples is None):
association_real = association(real)
association_samples = association(samples)
return np.sum(np.abs(association_real.to_numpy().flatten() - association_samples.to_numpy().flatten()))
def plot_association(real_dataset, samples, dataset, model, force=True):
association_real = association(real_dataset)
samples_dataset = Dataset(None, None, samples, real_dataset.info, None)
association_samples = association(samples_dataset)
mask = np.triu(np.ones_like(association_real, dtype=np.bool))
colormap = sns.diverging_palette(20, 220, n=256)
plt.figure(figsize=(20, 10))
plt.suptitle(model.upper() + ' Association')
plt.subplot(1, 2, 1)
plt.title('Real')
sns.heatmap(association_real,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap)
plt.subplot(1, 2, 2)
plt.title('Samples')
sns.heatmap(association_samples,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap)
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist, model)
filepath = os.path.join(basepath, '{0}_{1}_association.png'.format(dataset, model))
if not os.path.exists(basepath):
os.makedirs(basepath)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
plt.close()
return association_difference(association_real=association_real, association_samples=association_samples)
def plot_all_association(complete_dataset, dataset, force=True, pass_tgan=True):
alist = dataset.split(sep='-', maxsplit=1)
base_path = os.path.join(RESULT_DIR, *alist)
if not os.path.exists(base_path):
os.makedirs(base_path)
file_path = os.path.join(base_path, 'real_{0}_association.csv'.format(dataset))
if os.path.exists(file_path):
association_real = pd.read_csv(file_path)
association_real = association_real.iloc[:, 1:]
association_real = association_real.set_index(association_real.columns)
print('loaded real association matrix')
else:
association_real = association(complete_dataset)
association_real.to_csv(file_path)
n_col = len(association_real.columns.to_list())
diff = {}
file_path = os.path.join(base_path, 'wgan_{0}_association.csv'.format(dataset))
if os.path.exists(file_path):
association_wgan = pd.read_csv(file_path)
association_wgan = association_wgan.iloc[:, 1:]
association_wgan = association_wgan.set_index(association_wgan.columns)
print('loaded WGAN association matrix')
else:
samples_wgan = complete_dataset.samples.get('wgan')
samples_dataset = Dataset(None, None, samples_wgan, complete_dataset.info, None)
association_wgan = association(samples_dataset)
association_wgan.to_csv(os.path.join(base_path, 'wgan_{0}_association.csv'.format(dataset)))
diff['wgan'] = association_difference(association_real=association_real,
association_samples=association_wgan)
diff['wgan_norm'] = diff['wgan'] / (0.5 * len(association_real.columns.to_list()) * (len(association_real.columns.to_list()) - 1))
file_path = os.path.join(base_path, 'ctgan_{0}_association.csv'.format(dataset))
if os.path.exists(file_path):
association_ctgan = pd.read_csv(file_path)
association_ctgan = association_ctgan.iloc[:, 1:]
association_ctgan = association_ctgan.set_index(association_ctgan.columns)
print('loaded CTGAN association matrix')
else:
samples_ctgan = complete_dataset.samples.get('ctgan')
samples_dataset = Dataset(None, None, samples_ctgan, complete_dataset.info, None)
association_ctgan = association(samples_dataset)
association_ctgan.to_csv(os.path.join(base_path, 'ctgan_{0}_association.csv'.format(dataset)))
diff['ctgan'] = association_difference(association_real=association_real,
association_samples=association_ctgan)
diff['ctgan_norm'] = diff['ctgan'] / (
0.5 * len(association_real.columns.to_list()) * (len(association_real.columns.to_list()) - 1))
file_path = os.path.join(base_path, 'tgan_{0}_association.csv'.format(dataset))
if pass_tgan:
if os.path.exists(file_path):
association_tgan = pd.read_csv(file_path)
association_tgan = association_tgan.iloc[:, 1:]
association_tgan = association_tgan.set_index(association_tgan.columns)
print('loaded TGAN association matrix')
else:
samples_tgan = complete_dataset.samples.get('tgan')
samples_dataset = Dataset(None, None, samples_tgan, complete_dataset.info, None)
association_tgan = association(samples_dataset)
association_tgan.to_csv(os.path.join(base_path, 'tgan_{0}_association.csv'.format(dataset)))
diff['tgan'] = association_difference(association_real=association_real,
association_samples=association_tgan)
diff['tgan_norm'] = diff['tgan'] / (
0.5 * len(association_real.columns.to_list()) * (len(association_real.columns.to_list()) - 1))
colormap = sns.diverging_palette(20, 220, n=256)
mask = np.triu(np.ones_like(association_real, dtype=np.bool))
if pass_tgan:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20, 6))
else:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 6))
cbar_ax = fig.add_axes([.94, .5, .02, .4])
ax1.set_title('Real')
ax1.set_aspect('equal')
chart = sns.heatmap(association_real,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap,
ax=ax1,
cbar=False)
chart.set_yticklabels(labels=chart.get_yticklabels(), rotation=0)
ax2.set_title('WGAN')
ax2.set_aspect('equal')
sns.heatmap(association_wgan,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap,
ax=ax2,
cbar=False)
ax3.set_title('CTGAN')
ax3.set_aspect('equal')
if pass_tgan:
sns.heatmap(association_ctgan,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap,
ax=ax3,
cbar=False)
else:
sns.heatmap(association_ctgan,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap,
ax=ax3,
cbar=True,
cbar_ax=cbar_ax)
if pass_tgan:
ax4.set_title('TGAN')
ax4.set_aspect('equal')
sns.heatmap(association_tgan,
vmin=-1,
vmax=1,
mask=mask,
annot=False,
cmap=colormap,
ax=ax4,
cbar=True,
cbar_ax=cbar_ax)
plt.subplots_adjust(wspace=0.1)
plt.tight_layout()
alist = dataset.split(sep='-', maxsplit=1)
dataset = alist[0]
basepath = os.path.join(RESULT_DIR, *alist)
filepath = os.path.join(basepath, '{0}_all_association.png'.format(dataset))
if not os.path.exists(basepath):
os.makedirs(basepath)
if os.path.isfile(filepath) and force:
os.remove(filepath)
plt.savefig(filepath)
plt.close()
filepath = os.path.join(basepath, '{0}_euclidian_distance.json'.format(dataset))
save_json(diff, filepath)
|
{"/data/load_data.py": ["/definitions.py", "/dataset_spec.py"], "/models/general/testbed.py": ["/definitions.py"], "/models/wgan/synthesizer.py": ["/definitions.py"], "/evaluation/plot_marginals.py": ["/definitions.py"], "/evaluation/machine_learning.py": ["/definitions.py"], "/models/tgan/synthesizer.py": ["/definitions.py"], "/models/ctgan/synthesizer.py": ["/definitions.py"], "/evaluation/association.py": ["/definitions.py"]}
|
26,123
|
amomorning/dodecahedron-calendar
|
refs/heads/master
|
/gen_calendar.py
|
# -*- coding: UTF-8 -*-
import calendar
import ezdxf
import io
import json
import numpy as np
import time
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.backends.backend_pdf import PdfPages
def dxf_init():
doc = ezdxf.readfile("template.dxf")
msp = doc.modelspace()
inner_up = msp.query('POLYLINE[layer=="inner_up"]')
inner_down = msp.query('POLYLINE[layer=="inner_down"]')
dot = msp.query('POLYLINE[layer=="dot"]')
outer = msp.query('POLYLINE[layer=="outer"]')
return inner_up, inner_down, dot, outer
def dxf_calendar():
doc = ezdxf.new('R12', setup=True)
global cnt, year
for o in dxf_polys:
u = get_center(o)
def get_center(dxf_poly):
u = np.zeros(2)
for o in dxf_poly:
u += o.dxf.location[0:2]
u /= len(dxf_poly)
return u
def plot_polygons(ax, dxf_polys, t, color):
patches = []
for o in dxf_polys:
center = get_center(o)
li = []
for i in range(len(o)):
u = o[i].dxf.location[0:2]
v = center + t*(u-center)
li.append(v)
polygon = Polygon(np.array(li), facecolor=color)
ax.add_patch(polygon)
def plot_polyline(ax, dxf_pls, c, ls, lw=1):
for o in dxf_pls:
for i in range(len(o)-1):
u = o[i].dxf.location[0:2]
v = o[i+1].dxf.location[0:2]
ax.plot([u[0], v[0]], [u[1], v[1]], c=c, ls=ls, linewidth=lw)
def plot_calendar(ax, dxf_polys, dx=0, dy=0, ro=0):
global cnt, year
for o in dxf_polys:
u = get_center(o)
# plt.scatter(u[0], u[1], color='r')
t = str(calendar.month(year, cnt))
cnt += 1
ax.text(u[0]+dx, u[1]+dy, t, family='consolas', fontsize=13, rotation=ro, ha='center', ma='left', va='top',linespacing=1.6, wrap=False)
def gen_calendar(
style = 'doubled',
maincolor = '#C7D3DD',
percolor = '#E45C18',
backcolor='#EDF1F4',
linecolor='k',
local = False):
global cnt
cnt = 1
inner_up, inner_down, dot, outer = dxf_init()
fig = plt.figure(figsize=(24, 18))
rect = fig.patch
rect.set_facecolor('white')
ax = fig.add_subplot(1, 1, 1)
plot_polyline(ax, outer, linecolor, '-', 0.8)
plot_polyline(ax, dot, linecolor, (0, (10, 9)), 0.8)
plot_calendar(ax, inner_up, 0, 3.6, 0)
plot_calendar(ax, inner_down, 0, 4.4, 180)
if (style == 'filled'):
plot_polygons(ax, inner_up, 1, maincolor)
plot_polygons(ax, inner_down, 1, maincolor)
if (style == 'flipped'):
plot_polygons(ax, inner_up, 1, maincolor)
plot_polygons(ax, inner_up, 0.8, backcolor)
plot_polygons(ax, inner_down, 0.8, maincolor)
if (style == 'doubled'):
plot_polygons(ax, inner_up, 1, maincolor)
plot_polygons(ax, inner_up, 0.8, backcolor)
plot_polygons(ax, inner_down, 1, maincolor)
plot_polygons(ax, inner_down, 0.8, backcolor)
plt.axis('off')
if(local == True):
fig.savefig('foo.pdf', format='pdf', bbox_inches='tight')
pdfio = io.BytesIO()
with PdfPages(pdfio) as pdf:
fig.savefig(pdf, format="pdf", bbox_inches='tight')
pass
return pdfio.getvalue()
if __name__ == '__main__':
year = 2021
# gen_calendar(style='doubled', maincolor='#FFFFFF', linecolor='r', local=True)
gen_calendar(style='double', linecolor='r', local=True)
plt.show()
|
{"/http_server.py": ["/gen_calendar.py"]}
|
26,124
|
amomorning/dodecahedron-calendar
|
refs/heads/master
|
/http_server.py
|
from flask import Flask, render_template, jsonify, send_file, request
from random import *
from flask_cors import CORS
import requests
import gen_calendar
import time
import json
app = Flask(__name__, static_folder="calendar-web\dist", template_folder="calendar-web\dist")
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.route('/api/random')
def random_number():
response = {'randomNumber': randint(1, 100)}
return jsonify(response)
@app.route('/api/calendar')
def user_calendar():
print(request.args.get('data'))
data = json.loads(request.args.get('data'))
try:
gen_calendar.year = int(data['year'][0])
maincolor = '#' + data['maincolor']
percolor = '#' + data['percolor']
backcolor = '#' + data['backcolor']
select = data['select']
# print("new")
# print(request.args.get('data'))
return gen_calendar.gen_calendar(select, maincolor, percolor, backcolor)
# return send_file('./tmp.pdf', as_attachment=True, cache_timeout=0)
except Exception as e:
print(e)
return str(e)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if(app.debug):
return requests.get('http://localhost:8080/{}'.format(path)).text
if(len(path) > 0):
return app.send_static_file(path)
return render_template("index.html")
if __name__ == '__main__':
app.run(debug=True)
|
{"/http_server.py": ["/gen_calendar.py"]}
|
26,133
|
hexod0t/classifier-bert
|
refs/heads/master
|
/preprocessor.py
|
import torch
from transformers import BertTokenizerFast
class Preprocessor():
def __init__(self):
self.tokenizer = BertTokenizerFast.from_pretrained('./models')
"""
Function tokenize_data
Params: input_text -> sentence that could be true or fake
"""
def tokenize_data(self, text):
sent_id = self.tokenizer.batch_encode_plus(
text, padding=True, return_token_type_ids=False)
return sent_id
"""
Function create_tensors
Params: input_text -> sentence that could be true or fake
"""
def create_tensors(self, tokenized_input):
test_seq = torch.tensor(tokenized_input['input_ids'])
test_mask = torch.tensor(tokenized_input['attention_mask'])
return test_seq, test_mask
|
{"/app.py": ["/preprocessor.py"]}
|
26,134
|
hexod0t/classifier-bert
|
refs/heads/master
|
/app.py
|
# Imports
from flask import Flask, request, render_template
import numpy as np
import pandas as pd
import torch
from classifier import Classifier
from preprocessor import Preprocessor
from transformers import BertTokenizerFast
#import torch.nn as nn
#from sklearn.model_selection import train_test_split
#from sklearn.metrics import classification_report
#from transformers import AutoModel, BertTokenizerFast
# Settings
app = Flask(__name__)
model = None
#device = torch.device("cuda") # Setup GPU
# Load Data
"""
Load_dataset
Params Path -> current directory which contains the csv file
"""
""" Load saved model """
model = Classifier()
model.load_state_dict(torch.load('./data/saved_weights_final.pt', map_location=torch.device('cpu')))
tokenizer = Preprocessor()
# APP routes
@app.route('/', methods=['GET'])
def index():
return render_template('home.html')
@app.route('/predict', methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
new_request = request.form
if (new_request):
text = []
y = new_request['input_text']
text.append(y)
tokenize_input = tokenizer.tokenize_data(text)
test_seq, test_mask = tokenizer.create_tensors(tokenize_input)
pred = model(test_seq, test_mask)
pred = pred.detach().numpy()
pred = np.argmax(pred, axis = 1)
pred = "Falsa" if pred == 0 else "Verdadera"
return render_template("home.html", prediction="La noticia es {}".format(pred))
return render_template("home.html")
# Entry point
if __name__ == '__main__':
app.run()
|
{"/app.py": ["/preprocessor.py"]}
|
26,135
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/homebrewStopwords.py
|
from nltk.corpus import stopwords
from sklearn.feature_extraction._stop_words import ENGLISH_STOP_WORDS
stopwords = set(stopwords.words('english')).union(set(ENGLISH_STOP_WORDS))
#words to remove from stopwords
removedWords = set([
"wouldn't", 'hasn', "doesn't", 'weren', 'wasn',
"weren't", 'didn', 'mightn', "couldn't",
"that'll", "didn't", "haven't", 'needn',
"shouldn't", 'haven', "isn't", 'couldn', "it's",
'not', 'aren', 'isn', 'doesn', "wasn't",
'mustn', "should've", "shan't", "you'll", 'wouldn',
"aren't", "won't", 'hadn', 'shouldn', "needn't",
"hasn't", "mustn't", "hadn't", "mightn't", "you'd", "don't",
"wouldnt", "doesnt", "werent", "couldnt",
"thatll", "didnt", "youve", "havent",
"shouldnt", "isnt", "its", "wasnt",
"shouldve", "shant", "arent", "wont", "neednt",
"hasnt", "mustnt", "hadnt", "mightnt", "dont"
])
stopwords = stopwords - removedWords
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,136
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/evaluate.py
|
import json
f = open('userDefinedParameters.json','r')
param = json.load(f)
f.close()
# will come from json file later
model_name=param['model_name']
sequence_length = param['sequence_length']
#end
import matplotlib.pyplot as plt
import numpy as np
def visualizeTraining(hist):
h=hist.history
# Line Plot for plotting losses
plt.plot(h['val_loss'], label='Validation_Loss')
plt.plot(h['loss'],label='Training_Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('Results/Loss_Line_Plot.png')
plt.plot(h['val_accuracy'],label='Validation_Accuracy')
plt.plot(h['accuracy'], label='Training_Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('Results/Accuracy_Line_Plot.png')
def revert_Y_to_labels(yData):
yLabels=[ 1 if rec > 0.5 else 0 for rec in yData ]
return np.array(yLabels)
from model_architecture import model_framework
import os
def generateReport(X,Y):
if model_name=="" or os.path.exists(model_name)==False:
print("Kindly ensure that you train model before attempting to generate report")
return
model=model_framework()
model.load_weights(model_name)
scores = model.evaluate(X, Y, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
predictedOutput=model.predict(X)
from sklearn.metrics import confusion_matrix,classification_report
predict_labels=revert_Y_to_labels(predictedOutput)
actual_labels=Y
classification_report_string = "Classification Report: \n" + str(classification_report(actual_labels,predict_labels,target_names=['Negative','Positive']))
confusion_matrix_string = "Confusion Matrix: \n" + str(confusion_matrix(actual_labels,predict_labels))
with open("Results/classification_report.txt", "w") as file:
file.write(classification_report_string)
file.write(confusion_matrix_string)
print(classification_report_string)
print(confusion_matrix_string)
# print("Classification Report: \n", classification_report(Y_test, MNB.predict(X_test),target_names=['Negative','Positive']))
# print("Confusion Matrix: \n", confusion_matrix(Y_test, MNB.predict(X_test)))
def saveModelArchitecture():
from keras.utils.vis_utils import plot_model
from model_architecture import model_framework
model=model_framework()
plot_model(model,to_file='Results/modelArchitecture_plot.png',show_layer_names=True)
import pickle
from preprocess import clean_data
from preprocess import sanityEmbeddings
from tensorflow.keras.preprocessing.sequence import pad_sequences
def userTest():
userInput = input("Please enter a review: \n")
with open('vocab.pkl', 'rb') as pklFile:
vocab = pickle.load(pklFile)
with open('vectorizer.pkl', 'rb') as pklFile:
vectorizer = pickle.load(pklFile)
cleanedInput = clean_data(userInput)
intInput = sanityEmbeddings([cleanedInput], vectorizer, vectorizer.build_tokenizer())
intInput = pad_sequences(intInput , maxlen=sequence_length , padding='pre', value=0 )
model=model_framework()
model.load_weights(model_name)
print("Output was predicted to be " + ("positive" if revert_Y_to_labels(model.predict(intInput)) == 1 else "negative"))
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,137
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/preprocess.py
|
import json
f = open('userDefinedParameters.json','r')
param = json.load(f)
f.close()
# will come from json file later
vocabSize=param['vocabSize']
sequence_length=param['sequence_length']
#end
train_path = "../train/" # source data
test_path = "../test/" # test data for evaluation.
#Creating "imdb_train.csv","imdb_test.csv"
from nltk.tokenize import word_tokenize
def tokenize_data(data):
words=word_tokenize(data)
return words
def get_stopwords():
from homebrewStopwords import stopwords
return stopwords
'''
CLEAN_DATA takes a sentence and the stopwords as inputs
returns the sentence without any stopwords, html tags and punctuations. Also performs lemmatization and stemming
data - The input from which the stopwords have to be removed
stop_words_list - A list of stopwords
'''
from nltk.corpus import wordnet
treebank_to_wordnet_dict = {
'J' : wordnet.ADJ, 'V' : wordnet.VERB, 'N' : wordnet.NOUN, 'R' : wordnet.ADV
}
import string
import re
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
def clean_data(data,stop_words_list = get_stopwords()):
data = data.replace('\n', '')
# removes HTML tags
data=re.sub('<(\d|\w|\s|/)*>', '', data)
# removes punctuations
#data=re.sub(r'[^\w\s]','', data)
originalData = data
data = data.translate(str.maketrans(".", " "))
data = data.translate(str.maketrans('', '', string.punctuation + '_'))
words=tokenize_data(data)
words = [words.lower() for words in words]
wn=WordNetLemmatizer()
stemmed_words = [wn.lemmatize(w, (treebank_to_wordnet_dict.get(pos[0]) if treebank_to_wordnet_dict.get(pos[0]) is not None else wordnet.NOUN)) for w, pos in pos_tag(words)]
useful_stemmed_words = [w for w in stemmed_words if w not in stop_words_list]
result=' '.join(useful_stemmed_words)
return result
'''
IMDB_DATA_PREPROCESS explores the neg and pos folders from aclImdb/train and creates a output_file in the required format
input_dir - Path of the training samples
output_dir - Path where the file has to be saved
Name - Name with which the file has to be saved
Mix - Used for shuffling the data
'''
def performancePrint(i,type,name):
if i%500 ==0 :
print(i , " {} and for {} ".format(type,name))
import pandas as pd
import os
import numpy as np
def imdb_data_preprocess(input_dir, output_dir="./Dataset/", name="imdb_train.csv", mix=False):
# from pandas import DataFrame, read_csv
# import csv
indices = []
text = []
rating = []
i = 0
# positive review are present in pos folder and labelled as 1
for filename in os.listdir(input_dir+"pos"):
data = open(input_dir+"pos/"+filename, 'r' , encoding="ISO-8859-1").read()
data = clean_data(data)
indices.append(i)
text.append(data)
rating.append("1")
i = i + 1
performancePrint(i,"pos",name)
for filename in os.listdir(input_dir+"neg"):
data = open(input_dir+"neg/"+filename, 'r' , encoding="ISO-8859-1").read()
data = clean_data(data)
indices.append(i)
text.append(data)
rating.append("0")
i = i + 1
performancePrint(i,"neg",name)
Dataset = list(zip(indices,text,rating))
if mix:
np.random.shuffle(Dataset)
df = pd.DataFrame(data = Dataset, columns=['row_Number', 'text', 'polarity'])
df.to_csv(output_dir+name, index=False, header=True)
'''
RETRIEVE_DATA takes a CSV file as the input and returns the corresponding arrays of labels and data as output.
Name - Name of the csv file
Train - If train is True, both the data and labels are returned. Else only the data is returned
'''
import pandas as pd
def retrieve_data(input_dir='./Dataset/',name="imdb_train.csv"):
data_dir = input_dir + name
data = pd.read_csv(data_dir,header=0, encoding = 'ISO-8859-1')
X = data['text']
Y = data['polarity']
return X, Y
'''
TFIDF_PROCESS takes the data to be fit as the input and returns a vectorizer of the tfidf as output
Data - The data for which the bigram model has to be fit
'''
from sklearn.feature_extraction.text import TfidfVectorizer
def tfidf_process(data,max_features=vocabSize):
vectorizer = TfidfVectorizer(max_features=max_features, sublinear_tf = True)#, min_df = 0.02, max_df = 0.97)
vectorizer.fit(data)
return vectorizer
# Padding the sequences to a fixed length
from tensorflow.keras.preprocessing.sequence import pad_sequences
def add_padding_to_Xdata(xTrain,xTest, sequence_length):
xTrain = pad_sequences( xTrain , maxlen=sequence_length , padding='pre', value=0 )
xTest = pad_sequences( xTest , maxlen=sequence_length , padding='pre', value=0 )
return xTrain,xTest
def sanityEmbeddings(processedText, vectorizer, tokenizer):
documentTermMatrix = vectorizer.transform(processedText).toarray()
vocabDictionary = vectorizer.vocabulary_
intEmbeddings = []
temp = []
i = 0
for document in processedText:
topTfidfEmbeddings = {vocabDictionary.get(token) : documentTermMatrix[i][vocabDictionary.get(token)] for token in tokenizer(document) if vocabDictionary.get(token) is not None}
#take middle 90% of the sorted embeddings based on tfidf score
topTfidfEmbeddings = dict(sorted(topTfidfEmbeddings.items(), key = lambda item: item[1], reverse = False)
[round(len(topTfidfEmbeddings) * 0.05):
round(len(topTfidfEmbeddings) * 0.95)])
for token in tokenizer(document):
embedding = vocabDictionary.get(token)
if embedding in topTfidfEmbeddings:
temp.append(embedding)
i += 1
intEmbeddings.append(temp)
temp = []
return intEmbeddings
import time
import pickle
def load_data_self_preprocess(processData=True):
start = time.time()
if processData is True:
print ("Preprocessing the training_data--")
imdb_data_preprocess(input_dir=train_path,output_dir=train_path,name="imdb_train.csv",mix=True)
print ("Preprocessing the testing_data--")
imdb_data_preprocess(input_dir=test_path,output_dir=test_path,name="imdb_test.csv",mix=False)
print ("Done with preprocessing in. Now, will retreieve the training data in the required format")
(xTrain_text, yTrain) = retrieve_data(input_dir=train_path,name="imdb_train.csv")
print ("Retrieved the training data. Now will retrieve the test data in the required format")
(xTest_text,yTest) = retrieve_data(input_dir=test_path,name="imdb_test.csv")
print ("Retrieved the test data. Now will initialize the model \n\n")
print("As per choice we will use vocabulary size as {}".format(vocabSize))
print('We will try to fit our train data usinf tfidf_vectorizer')
tfidf_vectorizer = tfidf_process(xTrain_text,max_features=vocabSize)
tokenizer = tfidf_vectorizer.build_tokenizer()
with open('vocab.pkl', 'wb') as pklFile:
pickle.dump(tfidf_vectorizer.vocabulary_, pklFile)
with open('vectorizer.pkl', 'wb') as pklFile:
pickle.dump(tfidf_vectorizer, pklFile)
xTrain = sanityEmbeddings(xTrain_text, tfidf_vectorizer, tokenizer)
xTest = sanityEmbeddings(xTest_text, tfidf_vectorizer, tokenizer)
xTrain,xTest=add_padding_to_Xdata(xTrain,xTest, sequence_length)
end=time.time()
print('The data preparation took {} ms'.format(end-start))
return (xTrain,yTrain),(xTest,yTest)
from tensorflow.keras.datasets import imdb
def load_data_keras_preproccesed(processData=False):
(xTrain,yTrain),(xTest,yTest) = imdb.load_data( num_words=vocabSize)
xTrain,xTest=add_padding_to_Xdata(xTrain,xTest, sequence_length)
return (xTrain,yTrain),(xTest,yTest)
if __name__=='__main__':
print("This file is for preprocessing the IMDB Dataset")
print("Not meant to be run directly")
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,138
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/model_architecture.py
|
import json
f = open('userDefinedParameters.json','r')
param = json.load(f)
f.close()
# will come from json file later
vocabSize=param['vocabSize']
sequence_length=param['sequence_length']
#end
# Working great
def classification_model_1(vocabSize,sequence_length,dropout_rate=0.2):
from tensorflow.keras.activations import relu
from tensorflow.keras.layers import Dense,Flatten
from tensorflow.keras.layers import Embedding
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.utils import plot_model
model = Sequential()
model.add(Embedding(vocabSize ,32, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Stuck at 50%
def classification_model_LSTM(vocabSize,sequence_length,dropout_rate=0.2):
from tensorflow.keras.activations import relu,softmax
from tensorflow.keras.layers import Embedding,LSTM, Dropout, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.utils import plot_model
# Parameters
adam=Adam(lr=0.000003)
model=Sequential()
model.add(Embedding(vocabSize ,32, input_length=sequence_length))
model.add(LSTM(32))
model.add(Dropout(dropout_rate))
model.add(Dense(32, activation=relu))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation='sigmoid'))
model.compile(
optimizer=adam ,
loss='binary_crossentropy',
metrics=[ 'accuracy' ]
)
print(model.summary())
# plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
return model
def classification_model_new_LSTM(vocabSize=5000,sequence_length=120,dropout_rate=0.3):
from tensorflow.keras.activations import relu
from tensorflow.keras.layers import Embedding,LSTM, Dropout, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
dropout_rate = 0.3
from tensorflow.keras.activations import relu
activation_func = relu
SCHEMA = [
Embedding( vocabSize , 10, input_length=sequence_length ),
LSTM( 32 ) ,
Dropout(dropout_rate),
Dense( 32 , activation=activation_func ) ,
Dropout(dropout_rate),
Dense(1, activation='sigmoid')
]
model = Sequential(SCHEMA)
model.compile(
loss='binary_crossentropy',
optimizer=Adam() ,
metrics=[ 'accuracy' ]
)
return model
def model_framework():
return classification_model_new_LSTM(vocabSize=vocabSize, sequence_length=sequence_length,dropout_rate=0.3)
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,139
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/modelTraining.py
|
import json
f = open('userDefinedParameters.json','r')
param = json.load(f)
f.close()
# will come from json file later
batch_size=param['batch_size']
model_name=param['model_name']
num_of_epochs=param['num_of_epochs']
#end
#Defining Our Deep Learning Model
from model_architecture import model_framework
from evaluate import visualizeTraining
def trainModel(xTrain,yTrain):
model=model_framework()
# Adding some checkpoints
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping
checkpoint=ModelCheckpoint(
model_name,
monitor="val_loss",
save_best_only=True,
save_weights_only=False,
mode="auto"
)
es_checkpoint=EarlyStopping(
monitor="val_loss",
min_delta=0.01,
patience=2,
verbose=0,
mode="auto"
)
hist=model.fit( xTrain, yTrain, batch_size=batch_size, epochs=num_of_epochs,
validation_split=0.20,callbacks=[checkpoint,es_checkpoint])
visualizeTraining(hist)
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,140
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/prepareJSON.py
|
# will come from json file later
'''
vocabSize=5000
batch_size=1000
sequence_length=120
train=True
model_name=best_model.h5
num_of_epochs=15
'''
import json
user_defined_parameters={
'vocabSize':5000,
'batch_size':1000,
'sequence_length':120,
'train':1,
'model_name':"best_model.h5",
'num_of_epochs':30,
'processData':0,
'userTest':0
}
with open('userDefinedParameters.json', 'w') as outfile:
json.dump(user_defined_parameters, outfile)
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,141
|
sixthkrum/IMDB-sentiment-analysis
|
refs/heads/master
|
/main.py
|
import json
f = open('userDefinedParameters.json','r')
param = json.load(f)
f.close()
# will come from json file later
train=param['train']==1
processData=param['processData']==1
user_test = param['userTest'] == 1
from evaluate import userTest
if user_test:
userTest()
exit()
# Loading Datset
from preprocess import load_data_keras_preproccesed,load_data_self_preprocess
(xTrain,yTrain),(xTest,yTest)= load_data_self_preprocess(processData=processData)
# (xTrain,yTrain),(xTest,yTest)= load_data_keras_preproccesed(processData=processData)
# Save Model Diagram (not working as of now)
from evaluate import saveModelArchitecture
saveModelArchitecture()
# Loading Code to Train Model
from modelTraining import trainModel
if train==True:
trainModel(xTrain,yTrain)
# Loading Code to Evaluate the results on test data
from evaluate import generateReport
generateReport(xTest,yTest)
|
{"/evaluate.py": ["/model_architecture.py", "/preprocess.py"], "/preprocess.py": ["/homebrewStopwords.py"], "/modelTraining.py": ["/model_architecture.py", "/evaluate.py"], "/main.py": ["/evaluate.py", "/preprocess.py", "/modelTraining.py"]}
|
26,152
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/signals.py
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Profile,RelationShip,Notification
from django.contrib.auth.models import User
@receiver(post_save, sender=User)
def post_save_create_profile(sender, created, instance, *args, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=RelationShip)
def post_save_RelationShip(sender, created, instance, *args, **kwargs):
sender_ = instance.sender
receiver_ = instance.receiver
if instance.status == 'following':
sender_.follow.add(receiver_.user)
get, created = Notification.objects.get_or_create(sender=sender_, receiver=receiver_, not_type='Follow' , is_seen=False)
receiver_.save()
sender_.save()
elif instance.status == 'UnFollowing':
receiver_.follow.remove(sender_.user)
receiver_.save()
sender_.save()
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,153
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0005_auto_20210215_1433.py
|
# Generated by Django 3.1.5 on 2021-02-15 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_relationship'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='profile',
name='firstName',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='profile',
name='lastName',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='relationship',
name='status',
field=models.CharField(choices=[('following', 'following'), ('UnFollow', 'UnFollow')], max_length=50),
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,154
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/context_processors.py
|
from .models import Notification , Profile
from django.shortcuts import redirect
def noteF(request):
if request.user.is_authenticated == False or request.user.username != request.user.profile.slug:
return {'data':False}
profile = Profile.objects.get(slug=request.user.profile.slug)
not_f = Notification.objects.filter(receiver=profile,is_seen=False)
return {'not_f': not_f}
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,155
|
Hady-Taha/Twitx
|
refs/heads/main
|
/posts/views.py
|
from django.shortcuts import render,redirect
from .models import Post, Like
from profiles.models import Profile,Notification
from .forms import AddPost
from django.http import JsonResponse
from django.db.models import Q
# Create your views here.
def twitx(request):
posts = Post.objects.all().order_by('?')
context = {
'title': 'twitx',
'posts': posts,
}
return render(request, 'posts/randomPost.html', context)
def home(request):
if request.user.is_authenticated==False:
return redirect('twitx')
form = AddPost()
posts = Post.objects.filter(Q(user=request.user.profile) | Q(user__in=request.user.profile.get_all_following_posts)).order_by('-created')
if request.method == 'POST':
form = AddPost(request.POST)
if form.is_valid():
newPost = form.save(commit=False)
newPost.user = request.user.profile
newPost.save()
form = AddPost()
context = {
'title': 'Home',
'posts': posts,
'form': form,
}
return render(request, 'posts/home.html', context)
pass
def like_unlike(request):
user = request.user.profile
if request.method == 'POST':
postID = request.POST.get('post_id')
getPost = Post.objects.get(id=postID)
if user in getPost.liked.all():
getPost.liked.remove(user)
else:
getPost.liked.add(user)
like, created = Like.objects.get_or_create(user=user, post=getPost)
if created == False:
if like.value == 'Like':
like.value = 'UnLike'
else:
like.value = 'Like'
else:
like.value='Like'
like.save()
getPost.save()
return JsonResponse({'data': getPost.get_all_likes_count()}, status=200)
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,156
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/views.py
|
from django.shortcuts import render,redirect
from .models import Profile,RelationShip,Notification
from .forms import ProfileSetting
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout, authenticate
from posts.models import Post
# Create your views here.
def profiles(request, slug):
if request.user.is_authenticated==False:
return redirect('twitx')
user = request.user.profile
profile = Profile.objects.get(slug=slug)
posts = Post.objects.filter(user=profile).order_by('-created')
not_ = Notification.objects.filter(receiver=profile, is_seen=False)
if request.method == 'POST':
if profile.user in user.follow.all():
user.follow.remove(profile.user)
else:
user.follow.add(profile.user)
#done add profiles To Following
###############################
#now add to RelationShip models
follows, created = RelationShip.objects.get_or_create(sender=user, receiver=profile)
if created == False:
if follows.status == 'following':
follows.status = 'UnFollow'
else:
follows.status = 'following'
else:
follows.status = 'following'
user.save()
follows.save()
context={
'title': f'Profile / {profile.user}',
'profile': profile,
'not_': not_,
'posts':posts,
}
return render(request, 'profiles/profiles.html', context)
def settings(request, slug):
if request.user.username != slug:
return redirect('home')
profile = Profile.objects.get(slug=slug)
form = ProfileSetting(request.POST or None,request.FILES or None, instance=profile)
if form.is_valid():
form.save()
context = {
'title': 'settings',
'profile': profile,
'form':form,
}
return render(request, 'profiles/settings.html', context)
def notification(request, slug):
if request.user.username != slug:
return redirect('home')
profile = Profile.objects.get(slug=slug)
not_ = Notification.objects.filter(receiver=profile,is_seen=False).order_by('-created')
if request.method == 'POST':
Notification.objects.filter(receiver=profile, is_seen=False).update(is_seen=True)
context = {
'title': 'notification',
'not_':not_,
}
return render(request, 'profiles/notification.html', context)
def register(request):
if request.user.is_authenticated:
return redirect('home')
form = UserCreationForm()
posts = Post.objects.all().order_by('?')[:1]
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(username=form.cleaned_data['username'],password=form.cleaned_data['password1'],)
login(request, user)
username=form.cleaned_data['username']
return redirect(f'/profile/{username}')
else:
form = UserCreationForm()
context = {
'title': 'Register',
'form':form,
'posts':posts,
}
return render(request,'profiles/register.html',context)
def authentication(request):
if request.user.is_authenticated:
return redirect('home')
form = AuthenticationForm()
posts = Post.objects.all().order_by('?')[:1]
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
print(user)
login(request, user)
return redirect('home')
else:
form = AuthenticationForm()
context = {
'title': 'Login',
'form':form,
'posts':posts,
}
return render(request,'profiles/login.html',context)
def vlogout(request):
logout(request)
context = {
'title': 'logout',
}
return render(request, 'profiles/logout.html', context)
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,157
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0009_auto_20210216_0029.py
|
# Generated by Django 3.1.5 on 2021-02-15 22:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_notfiction_user'),
]
operations = [
migrations.RenameModel(
old_name='Notfiction',
new_name='Notification',
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,158
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/models.py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
# Create your models here.
class Profile(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
firstName=models.CharField(max_length=50, blank=True, null=True)
lastName=models.CharField(max_length=50, blank=True, null=True)
photo=models.ImageField(upload_to='profilePhoto',default='photoEX.png')
bio = models.TextField(max_length=100, blank=True, null=True)
follow = models.ManyToManyField(User, related_name='follow', blank=True, null=True)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(blank=True, null=True)
def __str__(self):
return str(self.user)
@property
def get_all_following(self):
return self.sender.all()
@property
def get_all_following_count(self):
return self.sender.all().count()
@property
def get_all_following_posts(self):
qs = set()
for i in self.sender.all():
qs.add(i.receiver)
return qs
@property
def get_all_follower(self):
return self.receiver.all()
@property
def get_all_follower_count(self):
follower=0
for followers in self.receiver.all():
if followers.status == 'following':
follower += 1
return follower
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(str(self.user))
super(Profile, self).save(*args, **kwargs)
class RelationShip(models.Model):
STATUS_CHOICES = [('following', 'following'), ('UnFollow', 'UnFollow')]
sender = models.ForeignKey(Profile, related_name='sender', on_delete=models.CASCADE)
receiver = models.ForeignKey(Profile, related_name='receiver', on_delete=models.CASCADE)
status = models.CharField(max_length=50, choices=STATUS_CHOICES)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(f'{self.sender} send to {self.receiver}')
class Notification(models.Model):
NOTIFICATION_TYPES = (('Like', 'Like'), ('Comment', 'Comment'), ('Follow', 'Follow'))
sender = models.ForeignKey(Profile, related_name='sender_notification', on_delete=models.CASCADE,blank=True, null=True)
receiver = models.ForeignKey(Profile, related_name='receiver_notification', on_delete=models.CASCADE,blank=True, null=True)
not_type = models.CharField(max_length=50, choices=NOTIFICATION_TYPES, blank=True, null=True)
is_seen = models.BooleanField(default=False)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(f'{self.sender} send to {self.receiver} {self.not_type}')
#https://www.youtube.com/watch?v=z4USlooVXG0&list=PLLRM7ROnmA9F2vBXypzzplFjcHUaKWWP5&index=1&ab_channel=JustDjango
#TODO:
# 1- Make modal to following [/]
# 2- Notification [/]
# 3- Function to update profie (setting) [/]
# 4- Posts app [/]
# 5- Comment []
# 6- Like [/]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,159
|
Hady-Taha/Twitx
|
refs/heads/main
|
/posts/admin.py
|
from django.contrib import admin
from .models import Post, Like
# Register your models here.
admin.site.register(Post)
admin.site.register(Like)
# {% for post in request.user.profile.get_all_following %}
# {% for posts in post.receiver.user_post.all %}
# <p>{{posts}}</p>
# {% endfor %}
# {% endfor %}
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,160
|
Hady-Taha/Twitx
|
refs/heads/main
|
/posts/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.twitx, name='twitx'),
path('home/', views.home, name='home'),
path('like_unlike/', views.like_unlike, name='like_unlike')
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,161
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0010_auto_20210216_0040.py
|
# Generated by Django 3.1.5 on 2021-02-15 22:40
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('profiles', '0009_auto_20210216_0029'),
]
operations = [
migrations.AddField(
model_name='notification',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,162
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/forms.py
|
from django import forms
from .models import Profile
class ProfileSetting(forms.ModelForm):
class Meta:
model = Profile
fields = ('photo','firstName','lastName','bio',)
pass
pass
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,163
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/admin.py
|
from django.contrib import admin
from .models import Profile, RelationShip, Notification
# Register your models here.
admin.site.register(Profile)
admin.site.register(RelationShip)
admin.site.register(Notification)
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,164
|
Hady-Taha/Twitx
|
refs/heads/main
|
/posts/models.py
|
from django.db import models
from profiles.models import Profile
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(Profile, related_name='user_post', on_delete=models.CASCADE)
liked = models.ManyToManyField(Profile, blank=True, null=True)
body = models.TextField(max_length=750)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def get_all_likes_count(self):
return self.liked.all().count()
def get_all_likes(self):
return self.liked.all()
def __str__(self):
return str(f'{self.user} post {self.body[:20]}')
class Like(models.Model):
VALUE_CHOICES = [('Like', 'Like'), ('UnLike', 'UnLike')]
user = models.ForeignKey(Profile, related_name='user_like', on_delete=models.CASCADE)
post = models.ForeignKey(Post, related_name='post_like', on_delete=models.CASCADE)
value = models.CharField(max_length=50, choices=VALUE_CHOICES)
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(f'{self.user} liked {self.post}')
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,165
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0012_auto_20210216_1311.py
|
# Generated by Django 3.1.5 on 2021-02-16 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0011_auto_20210216_1301'),
]
operations = [
migrations.RemoveField(
model_name='notification',
name='note',
),
migrations.AddField(
model_name='notification',
name='note',
field=models.CharField(blank=True, choices=[(1, 'Like'), (2, 'Comment'), (3, 'Follow')], max_length=50, null=True),
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,166
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0013_auto_20210216_1315.py
|
# Generated by Django 3.1.5 on 2021-02-16 11:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0012_auto_20210216_1311'),
]
operations = [
migrations.RemoveField(
model_name='notification',
name='note',
),
migrations.RemoveField(
model_name='notification',
name='user',
),
migrations.AddField(
model_name='notification',
name='not_type',
field=models.CharField(blank=True, choices=[('Like', 'Like'), ('Comment', 'Comment'), ('Follow', 'Follow')], max_length=50, null=True),
),
migrations.AddField(
model_name='notification',
name='receiver',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiver_notification', to='profiles.profile'),
),
migrations.AddField(
model_name='notification',
name='sender',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sender_notification', to='profiles.profile'),
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,167
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('profile/<slug:slug>', views.profiles, name='profile'),
path('register/', views.register, name='register'),
path('login/', views.authentication, name='login'),
path('settings/<slug:slug>', views.settings, name='settings'),
path('notification/<slug:slug>', views.notification, name='notification'),
path('logout/', views.vlogout, name='logout'),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,168
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0011_auto_20210216_1301.py
|
# Generated by Django 3.1.5 on 2021-02-16 11:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0010_auto_20210216_0040'),
]
operations = [
migrations.RenameField(
model_name='notification',
old_name='clear',
new_name='is_seen',
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,169
|
Hady-Taha/Twitx
|
refs/heads/main
|
/profiles/migrations/0007_notfiction.py
|
# Generated by Django 3.1.5 on 2021-02-15 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0006_profile_slug'),
]
operations = [
migrations.CreateModel(
name='Notfiction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('clear', models.BooleanField(default=False)),
('note', models.ManyToManyField(blank=True, null=True, to='profiles.RelationShip')),
],
),
]
|
{"/profiles/signals.py": ["/profiles/models.py"], "/profiles/context_processors.py": ["/profiles/models.py"], "/posts/views.py": ["/posts/models.py", "/profiles/models.py"], "/profiles/views.py": ["/profiles/models.py", "/profiles/forms.py", "/posts/models.py"], "/posts/admin.py": ["/posts/models.py"], "/profiles/forms.py": ["/profiles/models.py"], "/profiles/admin.py": ["/profiles/models.py"], "/posts/models.py": ["/profiles/models.py"]}
|
26,175
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/database/ops.py
|
# This module contains operations you may need to interact with DB
# Simply put there functions like add/get user
from peewee import DoesNotExist
from database.db import User
def get_user(tg_user_id: int) -> (User, None):
try:
return User.get(User.tg_user_id == tg_user_id)
except DoesNotExist:
return None
def add_user(tg_user_id: int, tg_first_name: str, tg_last_name: str = "", tg_username: str = "") -> User:
user = User(
tg_user_id=tg_user_id,
tg_first_name=tg_first_name,
tg_last_name=tg_last_name,
tg_username=tg_username,
)
user.save()
return user
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,176
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/config.py
|
# This module contains config class
class Config:
TOKEN = "" # Token of your bot
LIST_OF_ADMINS = [] # List of administrators. Decorator @restricted uses this list to chek if user admin
LOG_LEVEL = 10 # 10 == logging.DEBUG
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
DB_FILENAME = ":memory:" # If you are using sqlite, then change it to your desired DB filename
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,177
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/manage.py
|
# For now this file only creates tables in your DB
# You can add anything DB-related here, e.g. migrations
from peewee import *
from database.db import MODELS, db_handle, stop_db
def main():
try:
db_handle.connect()
except Exception as px:
print(px)
return
print("Creating tables...")
for model in MODELS:
print(f"\t {model.__name__}...")
try:
model.create_table()
except Exception as px:
print(px)
print("Done creating tables")
stop_db()
if __name__ == '__main__':
main()
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,178
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/internals/bot.py
|
# Implementation of bot with message queue
import telegram.bot
from telegram.ext import messagequeue
class MQBot(telegram.bot.Bot):
def __init__(self, *args, is_queued_def=True, mqueue=None, **kwargs):
super(MQBot, self).__init__(*args, **kwargs)
self._is_messages_queued_default = is_queued_def
self._msg_queue = mqueue or messagequeue.MessageQueue()
def __del__(self):
try:
self._msg_queue.stop()
except:
pass
@messagequeue.queuedmessage
def send_message(self, *args, **kwargs):
return super(MQBot, self).send_message(*args, **kwargs)
@messagequeue.queuedmessage
def send_document(self, *args, **kwargs):
return super(MQBot, self).send_message(*args, **kwargs)
@messagequeue.queuedmessage
def send_photo(self, *args, **kwargs):
return super(MQBot, self).send_photo(*args, **kwargs)
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,179
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/database/db.py
|
# This module contains models of your DB
import datetime
from peewee import *
from playhouse.sqliteq import SqliteQueueDatabase
from config import Config
__sp = r"-\|/-\|/" # this thingie used as spinner
# You can choose other types of DB, supported by peewee
db_handle = SqliteQueueDatabase(Config.DB_FILENAME,
use_gevent=False,
autostart=True,
queue_max_size=128,
)
def stop_db() -> bool:
try:
db_handle.commit()
db_handle.stop()
x = 0
while not db_handle.is_stopped():
print("Closing database...", __sp[x % 8], end="\r")
x += 1
continue
print("Closing database... ok")
except InternalError as e:
print(e)
return False
return True
class BaseModel(Model):
class Meta:
database = db_handle
class User(BaseModel):
id = PrimaryKeyField(null=False)
tg_user_id = IntegerField(null=False)
tg_first_name = CharField(null=False)
tg_last_name = CharField(null=False)
tg_username = CharField(null=False)
created_at = DateTimeField(default=datetime.datetime.now())
class Meta:
db_table = "users"
order_by = ("created_at",)
MODELS = [User] # Add your user models, to simplify generation of tables
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,180
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/main.py
|
# This is entry point of your bot
from config import Config
import logging
import bot_frame
import atexit
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
def main():
bot_frame.run()
@atexit.register
def _stop_worker_threads():
bot_frame.stop()
if __name__ == "__main__":
main()
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,181
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/internals/actions.py
|
# This module contains decorators, which will automatically send bot's action to user
from functools import wraps
from telegram import ChatAction
def send_action(action):
"""Sends `action` while processing func command."""
def decorator(func):
@wraps(func)
def command_func(update, context, *args, **kwargs):
context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)
return func(update, context, *args, **kwargs)
return command_func
return decorator
send_typing_action = send_action(ChatAction.TYPING)
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,182
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/internals/utils.py
|
# This module contains different things you may need
from functools import wraps
from config import Config
def restricted(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
if user_id not in Config.LIST_OF_ADMINS:
print("Unauthorized access denied for {}.".format(user_id))
return
return func(update, context, *args, **kwargs)
return wrapped
def mention_user(tg_user_id: int, tg_first_name: str, tg_last_name: str = "", tg_username: str = "") -> str:
tpl_mstr = "[{0}](tg://user?id={1})"
ment_str = ""
if tg_first_name:
ment_str += tg_first_name
if tg_username:
ment_str += f" '{tg_username}'"
if tg_last_name:
ment_str += " " + tg_last_name
return tpl_mstr.format(ment_str, tg_user_id)
def mention_post(group_id, message_id) -> str:
return f"[{group_id}/{message_id}](https://t.me/{group_id}/{message_id})"
def pluralize(quantity: int, singular: str, plural: str = "") -> str:
if quantity == 1 or not len(singular):
return singular
if plural:
return plural
last_letter = singular[-1].lower()
if last_letter == "y":
return singular[:-1] + "ies"
elif last_letter == "s":
return singular + "es"
else:
return singular + "s"
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, [header_buttons])
if footer_buttons:
menu.append([footer_buttons])
return menu
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,183
|
NuarkNoir/python-telegram-bot-template
|
refs/heads/master
|
/bot_frame.py
|
# This module contains all your bot action handlers definition
# Also there is run() and stop() functions to start and
# stop bot, but you are not really gonna call them by hand
import sys
import traceback
from telegram import Update, ParseMode
from telegram.ext import Updater, CommandHandler
from telegram.ext.messagequeue import MessageQueue
from telegram.utils.helpers import mention_html
from telegram.utils.request import Request
from config import Config
from internals.bot import MQBot
from database.db import db_handle, stop_db
from internals.actions import send_typing_action
def error_callback(update, context):
trace = "".join(traceback.format_tb(sys.exc_info()[2]))
payload = ""
if update.effective_user:
payload += f" with the user {mention_html(update.effective_user.id, update.effective_user.first_name)}"
if update.effective_chat:
payload += f" within the chat <b>{update.effective_chat.title}</b>"
if update.effective_chat.username:
payload += f" (@{update.effective_chat.username})"
if update.poll:
payload += f" with the poll id {update.poll.id}."
text = (f"Hey. The error <pre>{context.error}</pre> happened {payload}. "
f"The full traceback:\n\n<pre>{trace}</pre>")
for dev_id in Config.LIST_OF_ADMINS:
context.bot.send_message(dev_id, text, parse_mode=ParseMode.HTML)
raise
@send_typing_action
def start(update: Update, context):
""" /start command handler """
context.bot.send_message(
chat_id=update.effective_chat.id,
text="Hello there",
isgroup=False
)
mq = MessageQueue(
all_burst_limit=29, all_time_limit_ms=1020,
group_burst_limit=10, group_time_limit_ms=40000
)
request = Request(con_pool_size=1)
bot_worker = MQBot(Config.TOKEN, request=request, mqueue=mq)
updater = Updater(bot=bot_worker, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_error_handler(error_callback)
def run():
db_handle.start()
updater.start_polling()
updater.idle()
def stop():
if not stop_db():
print("Cannot stop database")
mq.stop()
updater.stop()
|
{"/database/ops.py": ["/database/db.py"], "/manage.py": ["/database/db.py"], "/database/db.py": ["/config.py"], "/main.py": ["/config.py", "/bot_frame.py"], "/internals/utils.py": ["/config.py"], "/bot_frame.py": ["/config.py", "/internals/bot.py", "/database/db.py", "/internals/actions.py"]}
|
26,184
|
thomasverweij/hue_spotify
|
refs/heads/master
|
/hue_spotify/__init__.py
|
from .app import app
__version__ = '0.1.0'
app.run(host='0.0.0.0', port=8080)
|
{"/hue_spotify/__init__.py": ["/hue_spotify/app.py"]}
|
26,185
|
thomasverweij/hue_spotify
|
refs/heads/master
|
/hue_spotify/app.py
|
from flask import Flask, render_template, redirect
import phue
from phue import Bridge
import spotipy
import spotipy.util as util
import os
import sys
hue_ip = os.getenv('HUE_IP')
username = os.getenv('SPOTIFY_USERNAME')
client_id=os.getenv('SPOTIFY_CLIENT_ID')
client_secret=os.getenv('SPOTIFY_SECRET')
redirect_uri='http://localhost/'
scope = 'user-modify-playback-state user-read-playback-state'
group = os.getenv('HUE_ROOM')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<scene>/<track>')
def set_scene_and_song(scene, track):
sp = spotify_client()
device = list(filter(lambda x: x['is_active'], [device for device in sp.devices()['devices']]))[0]['id']
if sp and hb:
hb.run_scene(group_name=group, scene_name=scene)
sp.start_playback(device_id=device, uris=['spotify:track:{}'.format(track)])
else:
print(sp, hb)
return redirect('/')
def spotify_client():
token = util.prompt_for_user_token(
username,
scope,
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri
)
sp = spotipy.Spotify(auth=token)
return sp or False
def hue_bridge(ip):
bridge = Bridge(ip)
try:
bridge.connect()
return bridge
except:
print('hue bridge connection error')
return false
try:
hb = hue_bridge(hue_ip)
except phue.PhueRegistrationException:
print('Press button of bridge within 30 seconds and run again')
sys.exit()
sp = spotify_client()
|
{"/hue_spotify/__init__.py": ["/hue_spotify/app.py"]}
|
26,216
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/view.py
|
from flask import render_template, jsonify,request
from app.models import BlessForm
from app import db
def index():
return render_template("Index.htm")
def timeMan():
return render_template("lovetree.htm")
def story():
return render_template("story.htm")
def letter():
return render_template("Letter.htm")
def youme():
return render_template("youme.html")
def bless_req():
name = request.args.get('name')
bless_txt =request.args.get('bless_txt')
newobj = BlessForm(name=name, bless=bless_txt)
db.session.add(newobj)
db.session.commit()
usersBless = BlessForm.query.all()
print(usersBless)
return jsonify({'name':name,'bless_txt':bless_txt})
def Comments():
return render_template("Comments.html")
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,217
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/static/images/img_suofang.py
|
import os
from PIL import Image
ext = ['jpg', 'jpeg', 'png']
files = os.listdir('./index/home-setion')
def process_image(filename, mwidth=300, mheight=400):
image = Image.open('./index/home-setion/' + filename)
w, h = image.size
if w <= mwidth and h <= mheight:
print(filename, 'is OK.')
return
if (1.0 * w / mwidth) > (1.0 * h / mheight):
scale = 1.0 * w / mwidth
new_im = image.resize((int(w / scale), int(h / scale)), Image.ANTIALIAS)
else:
scale = 1.0 * h / mheight
new_im = image.resize((int(w / scale), int(h / scale)), Image.ANTIALIAS)
new_im.save('./index/home-setion/new-' + filename)
new_im.close()
#process_image("header-bg.png")
for file in files:
if file.split('.')[-1] in ext:
process_image(file)
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,218
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/__init__.py
|
from flask_sqlalchemy import SQLAlchemy
import pymysql
from flask import Flask
runapp = Flask(__name__)
#uri统一资源匹配符
#SQLALCHEMY_DATABASE_URI配置数据库连接的参数
# 格式为app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://数据库用户:密码@127.0.0.1/数据库名称'
runapp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:zhaohang@127.0.0.1/bless_db'
#请求结束后自动提交数据库修改
runapp.config['SQLALCHEMY_COMMMIT_ON_TEARDOWN'] = True
#如果设置成 True (默认情况),Flask-SQLAlchemy 将会追踪对象的修改并且发送信号。这需要额外的内存, 如果不必要的可以禁用它。
runapp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(runapp) #关联sqlalchemy和flask
from app import urls
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,219
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/main.py
|
from flask import Flask
from view import *
app = Flask(__name__)
app.add_url_rule(rule='/', endpoint='index', view_func=index)
app.add_url_rule(rule='/timeMan/', endpoint='timeMan', view_func=timeMan)
app.add_url_rule(rule='/story/', endpoint='story', view_func=story)
app.add_url_rule(rule='/letter/', endpoint='letter', view_func=letter)
app.add_url_rule(rule='/youme/', endpoint='youme', view_func=youme)
app.add_url_rule(rule='/Comments/', endpoint='Comments', view_func=Comments)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,220
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/main.py
|
from app import runapp
if __name__ == "__main__":
runapp.debug = True
runapp.run(host='0.0.0.0', port=5000)
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,221
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/models.py
|
from app import db
#存放数据模型
class BlessForm(db.Model): #继承BaseModel中的方法
"""
祝福表
"""
__tablename__ = 'blessform' #设置数据表的名称
id = db.Column(db.Integer, primary_key=True, index=True) #mysql创建的表必须包含一个主键,以上orm模型中,缺少主键,故创建失败
name = db.Column(db.String(32)) #设置对应的字段
bless = db.Column(db.String(100)) #设置对应的字段
#db.drop_all() #删除所有数据表
db.create_all() #创建所有数据表
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,222
|
Nereus-Minos/flaskLoveWeb
|
refs/heads/master
|
/app/urls.py
|
from app import runapp
from app.view import *
runapp.add_url_rule(rule='/', endpoint='index', view_func=index)
runapp.add_url_rule(rule='/timeMan/', endpoint='timeMan', view_func=timeMan)
runapp.add_url_rule(rule='/story/', endpoint='story', view_func=story)
runapp.add_url_rule(rule='/letter/', endpoint='letter', view_func=letter)
runapp.add_url_rule(rule='/youme/', endpoint='youme', view_func=youme)
runapp.add_url_rule(rule='/Comments/', endpoint='Comments', view_func=Comments)
runapp.add_url_rule(rule='/bless_req/', endpoint='bless_req', view_func=bless_req)
|
{"/app/view.py": ["/app/models.py", "/app/__init__.py"], "/main.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/app/urls.py": ["/app/__init__.py", "/app/view.py"]}
|
26,228
|
HyXFR/ss-tool
|
refs/heads/main
|
/ppaw/__init__.py
|
"""
Python Pastebin API Wrapper.
PPAW, an acronym for "Python Pastebin API Wrapper", is a Python package that
allows for simple access to pastebin's API. PPAW aims to be as easy to use as
possible.
developed based on official documentation here: http://pastebin.com/api
"""
__author__ = "James \"clug\" <clug@clug.xyz>"
__version__ = "2.0.0"
__all__ = ["pastebin"]
from ppaw.pastebin import Pastebin
|
{"/ppaw/__init__.py": ["/ppaw/pastebin.py"], "/ppaw/pastebin.py": ["/ppaw/__init__.py"], "/sstool.py": ["/ppaw/__init__.py"], "/ppaw/ppaw/request.py": ["/ppaw/__init__.py"]}
|
26,229
|
HyXFR/ss-tool
|
refs/heads/main
|
/ppaw/pastebin.py
|
"""
Python Pastebin API Wrapper.
Provide an object for easily accessible pastes and functions to
fetch existing pastes or create new ones.
"""
from ppaw import definitions, request
from ppaw.errors import PPAWBaseException
class Paste(object):
def __init__(self, key, date=None, title=None, size=None, expire_date=None, private=None, format_short=None, format_long=None, url=None, hits=None):
self.key = key
self.date = date
self.title = title
self.size = size
self.expire_date = expire_date
self.private = private
self.format_short = format_short
self.format_long = format_long
self.url = url if url else "http://pastebin.com/" + self.key
self.hits = hits
self.data = None
@classmethod
def fromString(cls, string):
attributes = ["key", "date", "title", "size", "expire_date", "private", "format_short", "format_long", "url", "hits"]
values = []
for attr in attributes:
opentag, closetag = "<paste_{0}>|</paste_{0}>".format(attr).split("|")
try:
values.append(string.split(opentag)[1].split(closetag)[0])
except IndexError:
values.append(None)
return cls(**dict((attr, values[idx]) for idx, attr in enumerate(attributes)))
def fetch(self):
self.data = request.get("http://pastebin.com/raw.php?i=" + self.key)
class Pastebin:
def __init__(self, dev_key):
self.dev_key = dev_key
self.user_key = ""
def login(self, user_name, user_password):
req = request.post(
definitions.login_url,
{
"api_dev_key": self.dev_key,
"api_user_name": user_name,
"api_user_password": user_password
}
)
self.user_key = req
return True
def get_trending_pastes(self):
return [Paste.fromString(x) for x in request.post(
definitions.post_url,
{
"api_option": "trends",
"api_dev_key": self.dev_key
}
).split("<paste>") if x]
def get_paste(self, paste_key):
paste = Paste(paste_key)
paste.fetch()
return paste
def create_paste(self, paste_code, paste_name="", paste_format="",
paste_private="", paste_expire_date="", guest=False):
user_key = "" if guest else self.user_key
return Paste(request.post(
definitions.post_url,
{
"api_option": "paste",
"api_dev_key": self.dev_key,
"api_user_key": user_key,
"api_paste_code": paste_code,
"api_paste_name": paste_name,
"api_paste_format": paste_format,
"api_paste_private": paste_private,
"api_paste_expire_date": paste_expire_date
}
).split("/")[-1])
def delete_paste(self, paste_key):
return request.post(
definitions.post_url,
{
"api_option": "delete",
"api_dev_key": self.dev_key,
"api_user_key": self.user_key,
"api_paste_key": paste_key
}
) == "Paste Removed"
def get_own_pastes(self, results_limit=50):
return [Paste.fromString(x) for x in request.post(
definitions.post_url,
{
"api_option": "list",
"api_dev_key": self.dev_key,
"api_user_key": self.user_key,
"api_results_limit": results_limit
}
).split("<paste>") if x]
def get_own_info(self):
return request.post(
definitions.post_url,
{
"api_option": "userdetails",
"api_dev_key": self.dev_key,
"api_user_key": self.user_key
}
)
|
{"/ppaw/__init__.py": ["/ppaw/pastebin.py"], "/ppaw/pastebin.py": ["/ppaw/__init__.py"], "/sstool.py": ["/ppaw/__init__.py"], "/ppaw/ppaw/request.py": ["/ppaw/__init__.py"]}
|
26,230
|
HyXFR/ss-tool
|
refs/heads/main
|
/ppaw/ppaw/errors.py
|
"""
Python Pastebin API Wrapper.
Provide custom exception for Pastebin errors using similar handling to
IOError by allowing error numbers and error descriptions.
"""
class PPAWBaseException(Exception):
def __init__(self, msg="", code=None):
"""
Set up the exception with an optional error code and message.
Args:
msg (Optional[str]): message to display with error
code (Optional[int]): code to display with error
Returns:
None
"""
super(PPAWBaseException, self).__init__()
self.code = -1 if code is None else msg
self.msg = code if code is not None else msg
def __str__(self):
"""
Make a human-readable representation of the error.
Returns:
None
"""
if self.code == -1 and not self.msg:
return ""
elif not self.msg:
msg = "[Errno {code}]"
elif self.code == -1:
msg = "{msg}"
else:
msg = "[Errno {code}] {msg}"
return msg.format(code=self.code, msg=self.msg)
|
{"/ppaw/__init__.py": ["/ppaw/pastebin.py"], "/ppaw/pastebin.py": ["/ppaw/__init__.py"], "/sstool.py": ["/ppaw/__init__.py"], "/ppaw/ppaw/request.py": ["/ppaw/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.