Upload 22 files
Browse files- classes/__pycache__/dataloader.cpython-311.pyc +0 -0
- classes/__pycache__/dataloader.cpython-39.pyc +0 -0
- classes/__pycache__/delayobject.cpython-311.pyc +0 -0
- classes/__pycache__/delayobject.cpython-39.pyc +0 -0
- classes/__pycache__/newNode.cpython-311.pyc +0 -0
- classes/__pycache__/node.cpython-311.pyc +0 -0
- classes/__pycache__/node.cpython-39.pyc +0 -0
- classes/__pycache__/pin.cpython-311.pyc +0 -0
- classes/__pycache__/pin.cpython-39.pyc +0 -0
- classes/dataloader.py +4 -0
- classes/delayobject.py +10 -0
- classes/newNode.py +72 -0
- classes/node.py +15 -0
- classes/pin.py +6 -0
- combined_generator.py +16 -0
- constants.py +12 -0
- discriminator.py +17 -0
- edge_generator.py +25 -0
- filtered_dataset.pickle +3 -0
- gan_train.py +152 -0
- load_model.py +40 -0
- matrix_generator.py +22 -0
classes/__pycache__/dataloader.cpython-311.pyc
ADDED
|
Binary file (599 Bytes). View file
|
|
|
classes/__pycache__/dataloader.cpython-39.pyc
ADDED
|
Binary file (476 Bytes). View file
|
|
|
classes/__pycache__/delayobject.cpython-311.pyc
ADDED
|
Binary file (807 Bytes). View file
|
|
|
classes/__pycache__/delayobject.cpython-39.pyc
ADDED
|
Binary file (604 Bytes). View file
|
|
|
classes/__pycache__/newNode.cpython-311.pyc
ADDED
|
Binary file (3.62 kB). View file
|
|
|
classes/__pycache__/node.cpython-311.pyc
ADDED
|
Binary file (1.07 kB). View file
|
|
|
classes/__pycache__/node.cpython-39.pyc
ADDED
|
Binary file (723 Bytes). View file
|
|
|
classes/__pycache__/pin.cpython-311.pyc
ADDED
|
Binary file (632 Bytes). View file
|
|
|
classes/__pycache__/pin.cpython-39.pyc
ADDED
|
Binary file (477 Bytes). View file
|
|
|
classes/dataloader.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class DataLoader:
|
| 2 |
+
|
| 3 |
+
def __init__(self,graph,node_feats=[],edge_index=None,label={}):
|
| 4 |
+
self.graph = graph
|
classes/delayobject.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class DelayObject:
|
| 2 |
+
def __init__(self, width, radix, name, range_="", toggle=None,binary =False, binary_flag = 0):
|
| 3 |
+
self.width = width
|
| 4 |
+
self.radix = radix
|
| 5 |
+
self.name = name
|
| 6 |
+
self.range_ = range_
|
| 7 |
+
self.binary = binary #if the object has a long binary output
|
| 8 |
+
self.binary_flag = binary_flag #binary object switching value
|
| 9 |
+
self.toggle = toggle if toggle is not None else {}
|
| 10 |
+
|
classes/newNode.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class newNode:
|
| 2 |
+
name_to_io = {'': (0,0),
|
| 3 |
+
'A2O1A1O1Ixp25_ASAP7_75t_L':(5,1),
|
| 4 |
+
'AND2x2_ASAP7_75t_L':(2,1),
|
| 5 |
+
'AND2x6_ASAP7_75t_L':(2,1),
|
| 6 |
+
'AND3x1_ASAP7_75t_L':(3,1),
|
| 7 |
+
'AND3x4_ASAP7_75t_L':(3,1),
|
| 8 |
+
'AND4x2_ASAP7_75t_L':(4,1),
|
| 9 |
+
'AO211x2_ASAP7_75t_L':(4,1),
|
| 10 |
+
'AO21x2_ASAP7_75t_L':(3,1),
|
| 11 |
+
'AO221x2_ASAP7_75t_L':(5,1),
|
| 12 |
+
'AO222x2_ASAP7_75t_L':(6,1),
|
| 13 |
+
'AO22x2_ASAP7_75t_L':(4,1),
|
| 14 |
+
'AO31x2_ASAP7_75t_L':(4,1),
|
| 15 |
+
'AO32x2_ASAP7_75t_L':(5,1),
|
| 16 |
+
'AO33x2_ASAP7_75t_L':(6,1),
|
| 17 |
+
'AOI211xp5_ASAP7_75t_L':(4,1),
|
| 18 |
+
'AOI21xp5_ASAP7_75t_L':(4,1),
|
| 19 |
+
'AOI221xp5_ASAP7_75t_L':(5,1),
|
| 20 |
+
'AOI222xp33_ASAP7_75t_L':(6,1),
|
| 21 |
+
'AOI22xp5_ASAP7_75t_L':(4,1),
|
| 22 |
+
'AOI31xp67_ASAP7_75t_L':(4,1),
|
| 23 |
+
'AOI32xp33_ASAP7_75t_L':(5,1),
|
| 24 |
+
'AOI33xp33_ASAP7_75t_L':(6,1),
|
| 25 |
+
'BUFx8_ASAP7_75t_SL':(1,1),
|
| 26 |
+
'DFFLQx4_ASAP7_75t_L':(3,1),
|
| 27 |
+
'FAx1_ASAP7_75t_L':(4,1),
|
| 28 |
+
'INVxp67_ASAP7_75t_SL':(1,1),
|
| 29 |
+
'NAND2xp5_ASAP7_75t_L':(2,1),
|
| 30 |
+
'NAND2xp67_ASAP7_75t_L':(2,1),
|
| 31 |
+
'NAND3xp33_ASAP7_75t_L':(3,1),
|
| 32 |
+
'NAND4xp75_ASAP7_75t_L':(4,1),
|
| 33 |
+
'NOR2xp67_ASAP7_75t_L':(2,1),
|
| 34 |
+
'NOR3xp33_ASAP7_75t_L':(3,1),
|
| 35 |
+
'NOR4xp75_ASAP7_75t_L':(4,1),
|
| 36 |
+
'OA211x2_ASAP7_75t_L':(4,1),
|
| 37 |
+
'OA21x2_ASAP7_75t_L':(3,1),
|
| 38 |
+
'OA221x2_ASAP7_75t_L':(5,1),
|
| 39 |
+
'OA222x2_ASAP7_75t_L':(6,1),
|
| 40 |
+
'OA22x2_ASAP7_75t_L':(4,1),
|
| 41 |
+
'OA31x2_ASAP7_75t_L':(4,1),
|
| 42 |
+
'OA33x2_ASAP7_75t_L':(6,1),
|
| 43 |
+
'OAI211xp5_ASAP7_75t_L':(4,1),
|
| 44 |
+
'OAI21xp5_ASAP7_75t_L':(3,1),
|
| 45 |
+
'OAI221xp5_ASAP7_75t_L':(5,1),
|
| 46 |
+
'OAI222xp33_ASAP7_75t_L':(6,1),
|
| 47 |
+
'OAI22xp5_ASAP7_75t_L':(4,1),
|
| 48 |
+
'OAI31xp67_ASAP7_75t_L':(4,1),
|
| 49 |
+
'OAI32xp33_ASAP7_75t_L':(5,1),
|
| 50 |
+
'OAI33xp33_ASAP7_75t_L':(6,1),
|
| 51 |
+
'OR2x6_ASAP7_75t_L':(2,1),
|
| 52 |
+
'OR3x4_ASAP7_75t_L':(3,1),
|
| 53 |
+
'OR4x2_ASAP7_75t_L':(4,1),
|
| 54 |
+
'XNOR2xp5_ASAP7_75t_L':(2,1),
|
| 55 |
+
'XOR2xp5_ASAP7_75t_L':(2,1)}
|
| 56 |
+
|
| 57 |
+
def __init__(self,inputs:int, outputs:int, input_val:int, output_val:int, cell_name:str, signal_type:str):
|
| 58 |
+
self.inputs = inputs
|
| 59 |
+
self.outputs = outputs
|
| 60 |
+
self.output_vals = output_val
|
| 61 |
+
self.input_vals = input_val
|
| 62 |
+
self.cell_name = cell_name
|
| 63 |
+
self.signal_type = signal_type
|
| 64 |
+
|
| 65 |
+
def __init__(self,old):
|
| 66 |
+
self.name = old.name
|
| 67 |
+
self.output_vals = None
|
| 68 |
+
self.input_vals = None
|
| 69 |
+
self.cell_name = old.cell
|
| 70 |
+
self.signal_type = None
|
| 71 |
+
self.inputs = self.name_to_io[self.cell_name][0]
|
| 72 |
+
self.outputs = self.name_to_io[self.cell_name][1]
|
classes/node.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Node:
|
| 2 |
+
def __init__(self,name:str,cell:str,output:str,inputs=[],radix="",toggle={},maxcap=0,fanout=0,load=0,peak=0,t1=0,t2=0,powertimedict={}):
|
| 3 |
+
self.name=name
|
| 4 |
+
self.cell=cell
|
| 5 |
+
self.output=output
|
| 6 |
+
self.inputs=inputs
|
| 7 |
+
self.radix=radix
|
| 8 |
+
self.toggle=toggle
|
| 9 |
+
self.fanout=fanout
|
| 10 |
+
self.load=load
|
| 11 |
+
self.maxcap=maxcap
|
| 12 |
+
self.powertimedict=powertimedict
|
| 13 |
+
self.t1=t1
|
| 14 |
+
self.t2=t2
|
| 15 |
+
self.peak=peak
|
classes/pin.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Pin:
|
| 2 |
+
def __init__(self,pg,rv,fv,pin=" "):
|
| 3 |
+
self.pin=pin
|
| 4 |
+
self.pg=pg
|
| 5 |
+
self.rv=rv
|
| 6 |
+
self.fv=fv
|
combined_generator.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class CombinedGenerator(nn.Module):
|
| 5 |
+
def __init__(self, edge_generator, matrix_generator):
|
| 6 |
+
super(CombinedGenerator, self).__init__()
|
| 7 |
+
self.edge_generator = edge_generator
|
| 8 |
+
self.matrix_generator = matrix_generator
|
| 9 |
+
|
| 10 |
+
def forward(self, rand_noise):
|
| 11 |
+
adj = self.edge_generator(rand_noise)
|
| 12 |
+
|
| 13 |
+
matrices = self.matrix_generator(rand_noise)
|
| 14 |
+
|
| 15 |
+
comb = torch.cat((matrices, adj), dim=1)
|
| 16 |
+
return comb
|
constants.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Constants for the generator
|
| 2 |
+
NUM_ROWS = 16
|
| 3 |
+
NODE_FEATURES = 3
|
| 4 |
+
INPUT_SIZE_GEN = 100 # Size of the random noise vector for the generator
|
| 5 |
+
HIDDEN_SIZE_GEN = 200
|
| 6 |
+
OUTPUT_SIZE_EDGE_GEN = NUM_ROWS * NUM_ROWS # Output size for the matrix used to generate edge in edge generator
|
| 7 |
+
OUTPUT_SIZE_MAT_GEN = NUM_ROWS * NODE_FEATURES # Output size for the matrix generator
|
| 8 |
+
|
| 9 |
+
# Constants for the discriminator
|
| 10 |
+
INPUT_SIZE_DISCRIM = NUM_ROWS * (NUM_ROWS + NODE_FEATURES)
|
| 11 |
+
OUTPUT_SIZE_DISCRIM = 1
|
| 12 |
+
HIDDEN_SIZE_DISCRIM = 100
|
discriminator.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class Discriminator(nn.Module):
|
| 5 |
+
def __init__(self, input_size, hidden_size, output_size):
|
| 6 |
+
super(Discriminator, self).__init__()
|
| 7 |
+
self.fc1 = nn.Linear(input_size, hidden_size)
|
| 8 |
+
self.fc2 = nn.Linear(hidden_size, hidden_size)
|
| 9 |
+
self.fc3 = nn.Linear(hidden_size, output_size)
|
| 10 |
+
self.relu = nn.ReLU()
|
| 11 |
+
self.sigmoid = nn.Sigmoid()
|
| 12 |
+
|
| 13 |
+
def forward(self, x):
|
| 14 |
+
out = self.relu(self.fc1(x))
|
| 15 |
+
out = self.relu(self.fc2(out))
|
| 16 |
+
out = self.sigmoid(self.fc3(out))
|
| 17 |
+
return out
|
edge_generator.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from constants import NUM_ROWS
|
| 4 |
+
|
| 5 |
+
class EdgeGenerator(nn.Module):
|
| 6 |
+
def __init__(self, input_size, hidden_size, output_size):
|
| 7 |
+
super(EdgeGenerator, self).__init__()
|
| 8 |
+
self.generator = nn.Sequential(
|
| 9 |
+
nn.Linear(input_size, hidden_size),
|
| 10 |
+
nn.ReLU(inplace=True),
|
| 11 |
+
nn.Linear(hidden_size, hidden_size),
|
| 12 |
+
nn.ReLU(inplace=True),
|
| 13 |
+
nn.Linear(hidden_size, output_size),
|
| 14 |
+
nn.Tanh()
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def forward(self, noise):
|
| 18 |
+
matrix_for_edge = self.generator(noise)
|
| 19 |
+
matrix_for_edge = torch.where(matrix_for_edge >= 0, torch.tensor(1.0), torch.tensor(0.0))
|
| 20 |
+
|
| 21 |
+
matrix_for_edge = torch.reshape(matrix_for_edge, (NUM_ROWS,NUM_ROWS))
|
| 22 |
+
|
| 23 |
+
matrix_for_edge = matrix_for_edge.fill_diagonal_(0)
|
| 24 |
+
|
| 25 |
+
return matrix_for_edge
|
filtered_dataset.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:737b8260a918acbd452977bcca7f979bcba4462e38b3acd50d78a95e97f00ad2
|
| 3 |
+
size 18002213
|
gan_train.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import pickle
|
| 5 |
+
sys.path.append("classes")
|
| 6 |
+
from pin import Pin
|
| 7 |
+
from delayobject import DelayObject
|
| 8 |
+
from node import Node
|
| 9 |
+
import numpy as np
|
| 10 |
+
import matplotlib.cm as cm
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import networkx as nx
|
| 13 |
+
from torch_geometric.utils import to_networkx
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torch.optim as optim
|
| 17 |
+
from torch_geometric.data import Batch
|
| 18 |
+
from torch_geometric.nn import GCNConv, GraphConv, global_mean_pool
|
| 19 |
+
from torch_geometric.data import Data
|
| 20 |
+
from torch.nn import Linear
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
from constants import *
|
| 23 |
+
import torch.utils.data
|
| 24 |
+
from torch.utils.data import Dataset, DataLoader, random_split
|
| 25 |
+
from edge_generator import EdgeGenerator
|
| 26 |
+
from matrix_generator import MatrixGenerator
|
| 27 |
+
from combined_generator import CombinedGenerator
|
| 28 |
+
from discriminator import Discriminator
|
| 29 |
+
|
| 30 |
+
edge_generator = EdgeGenerator(INPUT_SIZE_GEN, HIDDEN_SIZE_GEN, OUTPUT_SIZE_EDGE_GEN)
|
| 31 |
+
matrix_generator = MatrixGenerator(INPUT_SIZE_GEN, HIDDEN_SIZE_GEN, OUTPUT_SIZE_MAT_GEN)
|
| 32 |
+
discriminator = Discriminator(INPUT_SIZE_DISCRIM, HIDDEN_SIZE_DISCRIM, OUTPUT_SIZE_DISCRIM)
|
| 33 |
+
combined = CombinedGenerator(edge_generator,matrix_generator)
|
| 34 |
+
|
| 35 |
+
with open("filtered_dataset.pickle",'rb') as file:
|
| 36 |
+
dataset = pickle.load(file)
|
| 37 |
+
|
| 38 |
+
new_dataset = []
|
| 39 |
+
for data in dataset:
|
| 40 |
+
if len(data.adj) == 16:
|
| 41 |
+
new_dataset.append(torch.cat((data.x,data.adj),dim=1))
|
| 42 |
+
|
| 43 |
+
from sklearn.preprocessing import MinMaxScaler
|
| 44 |
+
|
| 45 |
+
scaler = MinMaxScaler()
|
| 46 |
+
# Fit the scaler to the data and transform it
|
| 47 |
+
for data in dataset:
|
| 48 |
+
data.x = scaler.fit_transform(data.x)
|
| 49 |
+
|
| 50 |
+
for data in dataset:
|
| 51 |
+
data.x = torch.tensor(data.x, dtype=torch.float32)
|
| 52 |
+
data.edge_index = torch.tensor(data.edge_index, dtype=torch.long) # Corrected dtype
|
| 53 |
+
data.y = torch.tensor(data.y, dtype=torch.float32)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
train_ratio = 0.8
|
| 57 |
+
num_train = int(len(new_dataset) * train_ratio)
|
| 58 |
+
num_test = len(new_dataset) - num_train
|
| 59 |
+
|
| 60 |
+
# Using random_split to create training and testing datasets
|
| 61 |
+
train_dataset, test_dataset = random_split(new_dataset, [num_train, num_test])
|
| 62 |
+
|
| 63 |
+
# Create DataLoader instances for training and testing
|
| 64 |
+
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, drop_last=True, num_workers=2)
|
| 65 |
+
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False, drop_last=True, num_workers=2)
|
| 66 |
+
|
| 67 |
+
discriminator_optimizer = optim.Adam(discriminator.parameters(), lr=0.01)
|
| 68 |
+
combined_optimizer = optim.Adam(combined.parameters(), lr=0.001)
|
| 69 |
+
|
| 70 |
+
criterion = nn.MSELoss()
|
| 71 |
+
device = torch.device("xpu" if torch.cuda.is_available() else "cpu")
|
| 72 |
+
num_epochs = 1000
|
| 73 |
+
iters = 0
|
| 74 |
+
|
| 75 |
+
G_losses = []
|
| 76 |
+
D_losses = []
|
| 77 |
+
|
| 78 |
+
def get_fake_data(batch_size, combined):
|
| 79 |
+
fake_graphs=[]
|
| 80 |
+
for i in range(batch_size):
|
| 81 |
+
rand_noise = torch.randn(1, INPUT_SIZE_GEN)
|
| 82 |
+
fake_graphs.append(combined(rand_noise))
|
| 83 |
+
return fake_graphs
|
| 84 |
+
|
| 85 |
+
for epoch in range(num_epochs):
|
| 86 |
+
|
| 87 |
+
for i, batch in enumerate(train_loader,0):
|
| 88 |
+
#DISCRIM TRAINING W/ REAL DATASET
|
| 89 |
+
batch = batch.to(device)
|
| 90 |
+
|
| 91 |
+
batch_flattened = batch.view(batch.size(0), -1)
|
| 92 |
+
|
| 93 |
+
discriminator.zero_grad()
|
| 94 |
+
outputs = discriminator(batch_flattened).view(-1)
|
| 95 |
+
|
| 96 |
+
label = torch.ones(64).float()
|
| 97 |
+
|
| 98 |
+
errD_real = criterion(outputs,label)
|
| 99 |
+
|
| 100 |
+
errD_real.backward()
|
| 101 |
+
D_x = outputs.mean().item()
|
| 102 |
+
|
| 103 |
+
#DISCRIM TRAINING W/ FAKE DATASET
|
| 104 |
+
|
| 105 |
+
fake_data = get_fake_data(64, combined)
|
| 106 |
+
tensor_batch = torch.stack(fake_data)
|
| 107 |
+
|
| 108 |
+
tensor_batch_flattened = tensor_batch.view(tensor_batch.size(0), -1)
|
| 109 |
+
|
| 110 |
+
label.fill_(0)
|
| 111 |
+
outputs = discriminator(tensor_batch_flattened.detach()).view(-1)
|
| 112 |
+
|
| 113 |
+
errD_fake = criterion(outputs, label)
|
| 114 |
+
errD_fake.backward()
|
| 115 |
+
|
| 116 |
+
D_G_z1 = outputs.mean().item()
|
| 117 |
+
|
| 118 |
+
errD = errD_real + errD_fake
|
| 119 |
+
discriminator_optimizer.step()
|
| 120 |
+
|
| 121 |
+
#GENERATOR TRAINING
|
| 122 |
+
|
| 123 |
+
combined.zero_grad()
|
| 124 |
+
label.fill_(1)
|
| 125 |
+
outputs = discriminator(tensor_batch_flattened).view(-1)
|
| 126 |
+
errG = criterion(outputs, label)
|
| 127 |
+
errG.backward()
|
| 128 |
+
D_G_z2 = outputs.mean().item()
|
| 129 |
+
|
| 130 |
+
combined_optimizer.step()
|
| 131 |
+
|
| 132 |
+
# Output training stats
|
| 133 |
+
if i % 50 == 0:
|
| 134 |
+
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
|
| 135 |
+
% (epoch, num_epochs, i, len(train_loader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
|
| 136 |
+
|
| 137 |
+
# Save Losses for plotting later
|
| 138 |
+
G_losses.append(errG.item())
|
| 139 |
+
D_losses.append(errD.item())
|
| 140 |
+
|
| 141 |
+
ay = get_fake_data(64, combined)
|
| 142 |
+
|
| 143 |
+
def deconstructor(matrix):
|
| 144 |
+
mat1 = matrix[:, :3] # Columns 0 to 2
|
| 145 |
+
mat2 = matrix[:, 3:] # The rest of the columns
|
| 146 |
+
return mat1, mat2
|
| 147 |
+
|
| 148 |
+
node_features, adj = deconstructor(ay[0])
|
| 149 |
+
edge_list = torch.nonzero(adj, as_tuple=False).t()
|
| 150 |
+
data_obj = Data(x=node_features, y = 0, edge_index = edge_list)
|
| 151 |
+
|
| 152 |
+
torch.save(combined.state_dict(), "model.pth")
|
load_model.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from constants import *
|
| 4 |
+
import torch.optim as optim
|
| 5 |
+
from edge_generator import EdgeGenerator
|
| 6 |
+
from matrix_generator import MatrixGenerator
|
| 7 |
+
from combined_generator import CombinedGenerator
|
| 8 |
+
|
| 9 |
+
edge_generator = EdgeGenerator(INPUT_SIZE_GEN, HIDDEN_SIZE_GEN, OUTPUT_SIZE_EDGE_GEN)
|
| 10 |
+
matrix_generator = MatrixGenerator(INPUT_SIZE_GEN, HIDDEN_SIZE_GEN, OUTPUT_SIZE_MAT_GEN)
|
| 11 |
+
|
| 12 |
+
model = CombinedGenerator(edge_generator, matrix_generator)
|
| 13 |
+
model.load_state_dict(torch.load("model.pth"))
|
| 14 |
+
model.eval()
|
| 15 |
+
|
| 16 |
+
def get_fake_data(batch_size, combined):
|
| 17 |
+
fake_graphs=[]
|
| 18 |
+
for i in range(batch_size):
|
| 19 |
+
rand_noise = torch.randn(1, INPUT_SIZE_GEN)
|
| 20 |
+
fake_graphs.append(combined(rand_noise))
|
| 21 |
+
return fake_graphs
|
| 22 |
+
fake_data = get_fake_data(64,model)
|
| 23 |
+
|
| 24 |
+
def deconstructor(matrix):
|
| 25 |
+
mat1 = matrix[:, :3]
|
| 26 |
+
mat2 = matrix[:, 3:]
|
| 27 |
+
return mat1, mat2
|
| 28 |
+
|
| 29 |
+
def adj_matrix_to_dict(adj_matrix):
|
| 30 |
+
adj_dict = {}
|
| 31 |
+
for i, row in enumerate(adj_matrix):
|
| 32 |
+
adj_dict[i] = []
|
| 33 |
+
for j, edge in enumerate(row):
|
| 34 |
+
if edge != 0:
|
| 35 |
+
adj_dict[i].append(j)
|
| 36 |
+
return adj_dict
|
| 37 |
+
|
| 38 |
+
dict_list = []
|
| 39 |
+
for data in fake_data:
|
| 40 |
+
dict_list.append(adj_matrix_to_dict(deconstructor(data)[1]))
|
matrix_generator.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from constants import NUM_ROWS, NODE_FEATURES
|
| 4 |
+
|
| 5 |
+
class MatrixGenerator(nn.Module):
|
| 6 |
+
def __init__(self, input_size, hidden_size, output_size):
|
| 7 |
+
super(MatrixGenerator, self).__init__()
|
| 8 |
+
self.generator = nn.Sequential(
|
| 9 |
+
nn.Linear(input_size, hidden_size),
|
| 10 |
+
nn.ReLU(inplace=True),
|
| 11 |
+
nn.Linear(hidden_size, hidden_size),
|
| 12 |
+
nn.ReLU(inplace=True),
|
| 13 |
+
nn.Linear(hidden_size, output_size),
|
| 14 |
+
nn.Sigmoid()
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def forward(self, noise):
|
| 18 |
+
gen_matrix = self.generator(noise)
|
| 19 |
+
|
| 20 |
+
gen_matrix = torch.reshape(gen_matrix, (NUM_ROWS,NODE_FEATURES))
|
| 21 |
+
|
| 22 |
+
return gen_matrix
|