index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
21,119
|
phdesign/microbit_games
|
refs/heads/main
|
/bop_it.py
|
from microbit import *
from time import sleep
from random import randint
import math
import music
# The starting time in milliseconds we will wait for a response.
WAIT_START_MS = 1500
# How quickly the wait time reduces. A smaller value means it shortens more quickly.
DECAY_RATE = 50
# Starting sound volume
START_VOLUME = 160
# Maximum volume
MAX_VOLUME = 255
# Number of steps that the volume will change in
VOLUME_STEPS = 5
# Volume indicator image to show
VOLUME_IMAGE = Image("55555:66666:77777:88888:99999")
# Repeat press the button within this time to change the volume, rather than just show it
VOLUME_CHANGE_WAIT = 3000
class Input:
BUTTON_A = 1
BUTTON_B = 2
PIN_LOGO = 3
class Option:
def __init__(self, prompt, exected, sound):
self.prompt = prompt
self.expected = exected
self.sound = sound
def volume_to_step(volume):
"""Converts an absolute volume (0-255) to a relative step (0-5)."""
return round((volume / MAX_VOLUME) * VOLUME_STEPS)
def show_volume(step):
"""Displays the current volume."""
image = VOLUME_IMAGE.shift_down(VOLUME_STEPS - step)
display.show(image, delay=500, clear=True)
def change_volume(volume):
"""Cycles incrementing the volume, resetting to zero after max."""
step = volume_to_step(volume)
new_step = step + 1 if step < VOLUME_STEPS else 0
new_volume = math.floor((MAX_VOLUME / VOLUME_STEPS) * new_step)
set_volume(new_volume)
music.play("A5:2", wait=False)
show_volume(new_step)
return new_volume
def create_exponential_decay(inital_value, decay_rate):
"""Creates an exponential decay function.
Given an initial value (wait time) and decay rate, returns
a function that exponentially decays over time.
A smaller decay rate means it decays over a shortened period.
"""
def exponential_decay(time):
return inital_value * math.exp(-(1 / decay_rate) * time)
return exponential_decay
def wait_for_input(wait_for):
"""Waits for an input for a set time.
Returns the input or None if it timed out.
"""
start = running_time()
while True:
if button_a.is_pressed():
return Input.BUTTON_A
elif button_b.is_pressed():
return Input.BUTTON_B
elif pin_logo.is_touched():
return Input.PIN_LOGO
elapsed = running_time() - start
if elapsed > wait_for:
return None
def play(options):
"""Play one round of the game."""
score = 0
# Show a starting animation
display.clear()
music.play(music.JUMP_UP, wait=False)
display.show("3")
sleep(0.7)
display.show("2")
sleep(0.7)
display.show("1")
sleep(1)
start = running_time()
wait_decay = create_exponential_decay(WAIT_START_MS, DECAY_RATE)
while True:
elapsed_sec = (running_time() - start) / 1000
wait_ms = round(wait_decay(elapsed_sec), 4)
# Pick a random input option
option = options[randint(0, 2)]
display.show(option.prompt)
music.play(option.sound)
result = wait_for_input(wait_ms)
if result == option.expected:
score += 1
display.clear()
sleep(round(wait_ms / 2000, 4))
else:
music.play(music.POWER_DOWN, wait=False)
display.show(Image.NO)
sleep(1)
break
return score
def main():
"""Main game loop."""
volume = START_VOLUME
options = [
Option(Image.ARROW_W, Input.BUTTON_A, "D4:4"),
Option(Image.ARROW_E, Input.BUTTON_B, "E4:4"),
Option(Image.ARROW_N, Input.PIN_LOGO, "F4:4"),
]
high_score = 0
set_volume(volume)
button_b_last_pushed = 0
while True:
# Press A to start the game
if button_a.is_pressed():
score = play(options)
# Check if this was a high score
if score > high_score:
high_score = score
music.play(music.PRELUDE, wait=False)
display.show(Image.HAPPY)
sleep(1)
display.scroll("High score: {:d}".format(score), wait=False)
else:
display.scroll("Score: {:d}".format(score), wait=False)
# Press B to show or change volume
if button_b.is_pressed():
elapsed = running_time() - button_b_last_pushed
# Change volume if the button was pushed twice in quick succession
if elapsed < VOLUME_CHANGE_WAIT:
volume = change_volume(volume)
else:
show_volume(volume_to_step(volume))
button_b_last_pushed = running_time()
if __name__ == "__main__":
main()
|
{"/bop_it.py": ["/music/__init__.py"], "/test/test_bop_it.py": ["/bop_it.py"]}
|
21,120
|
phdesign/microbit_games
|
refs/heads/main
|
/music/__init__.py
|
def play(music, pin="", wait=True, loop=False):
pass
|
{"/bop_it.py": ["/music/__init__.py"], "/test/test_bop_it.py": ["/bop_it.py"]}
|
21,121
|
phdesign/microbit_games
|
refs/heads/main
|
/test/test_bop_it.py
|
from unittest.mock import patch
from bop_it import create_exponential_decay, volume_to_step, change_volume
def test_create_exponential_decay():
fn = create_exponential_decay(1500, 200)
assert fn(0) == 1500
assert round(fn(10)) == 1427
assert round(fn(100)) == 910
assert round(fn(1000)) == 10
def test_volume_to_step():
assert volume_to_step(255) == 5
assert volume_to_step(128) == 3
assert volume_to_step(127) == 2
assert volume_to_step(1) == 0
assert volume_to_step(0) == 0
@patch("bop_it.show_volume")
@patch("bop_it.set_volume")
def test_change_volume_should_reset_volume_when_max(mock_set_volume, mock_show_volume):
new_volume = change_volume(255)
mock_show_volume.assert_called_once_with(0)
mock_set_volume.assert_called_once_with(0)
assert new_volume == 0
@patch("bop_it.show_volume")
@patch("bop_it.set_volume")
def test_change_volume_should_increment_volume(mock_set_volume, mock_show_volume):
new_volume = change_volume(128)
mock_show_volume.assert_called_once_with(4)
mock_set_volume.assert_called_once_with(204)
assert new_volume == 204
|
{"/bop_it.py": ["/music/__init__.py"], "/test/test_bop_it.py": ["/bop_it.py"]}
|
21,123
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/BCI-CompIV-2a/utils/meter.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
import math
class Meter(object):
def __init__(self, pp_pr, pp_gt):
self.n_tracked = None
self.loss = None
self.avg_loss = None
# main metric is classification error
self.pp_pr = pp_pr
self.pp_gt = pp_gt
self.start_metric = 0.0
self.correct = None
self.avg_metric = None
self.reset()
def reset(self):
self.n_tracked = 0
self.loss = 0.
self.avg_loss = 0.
self.correct = 0
self.avg_metric = self.start_metric
def update(self, pr_outs, gt_labels, loss, track_metric=False):
gt_labels = self.pp_gt(gt_labels)
batch_size = len(gt_labels)
self.n_tracked += batch_size
# update loss
self.loss += loss * batch_size
self.avg_loss = self.loss / self.n_tracked
if track_metric:
# update main metric
pr_labels = self.pp_pr(pr_outs)
assert len(pr_labels) == len(gt_labels), 'Number of predictions and number of ground truths do not match!'
for i in range(len(pr_labels)):
self.correct += pr_labels[i] == gt_labels[i]
self.avg_metric = (self.correct / self.n_tracked)
def is_better(self, current_metric, best_metric):
# compare classification errors
return current_metric > best_metric
def bar(self):
return '| Loss: {loss:8.5f} | Accuracy: {acc:6.2f}%%'.format(loss=self.avg_loss,
acc=self.avg_metric * 100)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,124
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/treat/daemon.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import torch.optim as optim
import torch.utils.data as tud
import itertools
from quantlab.treat.thermo.thermostat import Thermostat
import quantlab.treat.algo.lr_schedulers as lr_schedulers
class DynamicSubsetRandomSampler(tud.Sampler):
r"""Samples a fixed number of elements randomly from a dataset of fixed size without replacement.
Arguments:
numSamples: the number of samples to take
datasetLen: the size of the dataset from which to draw samples
"""
def __init__(self, numSamples, datasetLen):
assert(isinstance(datasetLen, int) or datasetLen.is_integer())
assert(isinstance(numSamples, int) or numSamples.is_integer())
self.datasetLen = datasetLen
self.numSamples = numSamples
def __iter__(self):
numFullSets = self.numSamples // self.datasetLen
numRemainingSamples = self.numSamples - numFullSets*self.datasetLen
indexesAll = (i for i in list())
for i in range(numFullSets):
indexes = torch.randperm(self.datasetLen)
indexesAll = itertools.chain(indexesAll,
(i.item() for i in indexes))
indexes = torch.randperm(self.datasetLen)[:numRemainingSamples]
indexesAll = itertools.chain(indexesAll,
(i.item() for i in indexes))
return indexesAll
def __len__(self):
return self.numSamples
def get_algo(logbook, net):
"""Return a training procedure for the experiment."""
# set ANA cooling schedule
thr_config = logbook.config['treat']['thermostat']
thr = Thermostat(net, **thr_config['params'])
if logbook.ckpt:
thr.load_state_dict(logbook.ckpt['treat']['thermostat'])
# set algo algorithm
opt_config = logbook.config['treat']['optimizer']
opt = optim.__dict__[opt_config['class']](net.parameters(), **opt_config['params'])
if logbook.ckpt:
opt.load_state_dict(logbook.ckpt['treat']['optimizer'])
lr_sched_config = logbook.config['treat']['lr_scheduler']
lr_sched_dict = {**optim.lr_scheduler.__dict__, **lr_schedulers.__dict__}
lr_sched = lr_sched_dict[lr_sched_config['class']](opt, **lr_sched_config['params'])
if logbook.ckpt:
lr_sched.load_state_dict(logbook.ckpt['treat']['lr_scheduler'])
return thr, opt, lr_sched
def get_data(logbook, num_workers=10):
"""Return data for the experiment."""
data_config = logbook.config['treat']['data']
# make dataset random split consistent (to prevent training instances from filtering into validation set)
rng_state = torch.get_rng_state()
torch.manual_seed(1234)
# load preprocessed datasets
train_set, valid_set, test_set = logbook.module.load_data_sets(logbook.dir_data, data_config)
# create random training set subselector for mini-epochs
if 'epoch_size_train' in data_config.keys():
shuffleTrain = False
cfgVal = float(data_config['epoch_size_train'])
# if cfgVal > 1:
# assert(cfgVal.is_integer())
# numSamples = int(cfgVal)
# else:
numSamples = int(cfgVal*len(train_set))
# assert(numSamples <= len(train_set))
samplerTrain = DynamicSubsetRandomSampler(numSamples, len(train_set))
else:
shuffleTrain, samplerTrain = True, None
# create loaders
if hasattr(train_set, 'collate_fn'): # if one data set needs `collate`, all the data sets should
train_l = tud.DataLoader(train_set, batch_size=data_config['bs_train'],
shuffle=shuffleTrain, sampler=samplerTrain,
num_workers=num_workers,
collate_fn=train_set.collate_fn)
valid_l = tud.DataLoader(valid_set, batch_size=data_config['bs_valid'],
shuffle=True, num_workers=num_workers,
collate_fn=valid_set.collate_fn)
test_l = tud.DataLoader(test_set, batch_size=data_config['bs_valid'],
shuffle=True, num_workers=num_workers,
collate_fn=test_set.collate_fn)
else:
train_l = tud.DataLoader(train_set, batch_size=data_config['bs_train'],
shuffle=shuffleTrain, sampler=samplerTrain,
num_workers=num_workers)
valid_l = tud.DataLoader(valid_set, batch_size=data_config['bs_valid'],
shuffle=True, num_workers=num_workers)
test_l = tud.DataLoader(test_set, batch_size=data_config['bs_valid'],
shuffle=True, num_workers=num_workers)
torch.set_rng_state(rng_state)
return train_l, valid_l, test_l
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,125
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/ResNet/resnet.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
# large parts of the code taken or adapted from torchvision
import math
import torch
import torch.nn as nn
#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
#from quantlab.indiv.ste_ops import STEActivation
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, convGen=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = convGen(inplanes, planes, kernel_size=3, stride=stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = convGen(planes, planes, kernel_size=3)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, convGen=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = convGen(inplanes, width, kernel_size=1)
self.bn1 = norm_layer(width)
self.conv2 = convGen(width, width, kernel_size=3,
stride=stride, groups=groups, dilation=dilation)
self.bn2 = norm_layer(width)
self.conv3 = convGen(width, planes * self.expansion, kernel_size=1)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, arch='resnet18', quant_schemes=None,
quantWeights=True, quantAct=True,
weightInqSchedule=None, weightInqBits=None, weightInqLevels=None,
weightInqStrategy="magnitude", weightInqQuantInit=None,
quantSkipFirstLayer=False, quantSkipLastLayer=False, pretrained=False):
super(ResNet, self).__init__()
assert(quantAct == False)
assert(quantSkipFirstLayer)
assert(quantSkipLastLayer)
if weightInqBits != None:
print('warning: weightInqBits deprecated')
if weightInqBits == 1:
weightInqLevels = 2
elif weightInqBits >= 2:
weightInqLevels = 2**weightInqBits
else:
assert(False)
def convGen(in_planes, out_planes, kernel_size=None, stride=1,
groups=1, dilation=1, firstLayer=False):
"""3x3 convolution with padding"""
if kernel_size == 3:
padding = dilation
elif kernel_size == 1:
padding = 0
elif kernel_size == 7:
padding = 3
else:
assert(False)
if firstLayer or not(quantWeights):
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
else:
return INQConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation,
numLevels=weightInqLevels, strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
class BasicBlockWrap(BasicBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, convGen=convGen)
class BottleneckWrap(Bottleneck):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, convGen=convGen)
if arch == 'resnet18':
block = BasicBlockWrap
layers = [2, 2, 2, 2]
elif arch == 'resnet34':
block = BasicBlockWrap
layers = [3, 4, 6, 3]
elif arch == 'resnet50':
block = BottleneckWrap
layers = [3, 4, 6, 3]
elif arch == 'resnet101':
block = BottleneckWrap
layers = [3, 4, 23, 3]
elif arch == 'resnet152':
block = BottleneckWrap
layers = [3, 8, 36, 3]
else:
assert(False)
self.createNet(block, layers, convGen,
num_classes=1000, zero_init_residual=False, groups=1,
width_per_group=64, replace_stride_with_dilation=None, norm_layer=None)
if pretrained:
from torch.hub import load_state_dict_from_url
state_dict = load_state_dict_from_url(model_urls[arch])
missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)
missing_keys_nonInq = [s for s in missing_keys if not (s.endswith('.sParam') or s.endswith('.weightFrozen'))]
assert(len(unexpected_keys) == 0)
assert(len(missing_keys_nonInq) == 0)
# if len(missing_keys) > 0:
# print('load_state_dict -- missing keys:')
# print(missing_keys)
# if len(unexpected_keys) > 0:
# print('load_state_dict -- unexpected keys:')
# print(unexpected_keys)
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule,
clearOptimStateOnStep=True)
def createNet(self, block, layers, convGen,
num_classes=1000, zero_init_residual=False, groups=1,
width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = convGen(3, self.inplanes, kernel_size=7, stride=2, firstLayer=True)
# self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
# bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0],
convGen=convGen)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0],
convGen=convGen)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1],
convGen=convGen)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],
convGen=convGen)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, INQConv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, convGen=None):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
convGen(self.inplanes, planes*block.expansion,
kernel_size=1, stride=stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, withStats=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
if withStats:
stats = []
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
if __name__ == "__main__":
model = ResNet(arch='resnet18', quantAct=False, weightInqSchedule={},
quantSkipFirstLayer=True, quantSkipLastLayer=True,
pretrained=True)
loadModel = True
if loadModel:
# path = '../../../ImageNet/logs/exp038/saves/best-backup.ckpt' # BWN
# path = '../../../ImageNet/logs/exp043/saves/best.ckpt' # TWN
path = '../../../ImageNet/logs/exp054/saves/best.ckpt' # BWN
fullState = torch.load(path, map_location='cpu')
netState = fullState['indiv']['net']
model.load_state_dict(netState)
import matplotlib.pyplot as plt
layerNames = list(netState.keys())
selectedLayers = ['layer4.0.conv1',
'layer2.1.conv2',
'layer1.0.conv2']
# selectedLayers = [l + '.weight' for l in selectedLayers]
selectedLayers = [l + '.weightFrozen' for l in selectedLayers]
_, axarr = plt.subplots(len(selectedLayers))
for ax, layerName in zip(axarr, selectedLayers):
plt.sca(ax)
plt.hist(netState[layerName].flatten(),
bins=201, range=(-3,3))
plt.xlim(-3,3)
plt.title(layerName)
exportONNX = False
if exportONNX:
modelFullPrec = ResNet(arch='resnet18', quantAct=False, quantWeights=False,
weightInqSchedule={},
quantSkipFirstLayer=True,
quantSkipLastLayer=True,
pretrained=True)
dummyInput = torch.randn(1, 3, 224, 224)
pbuf = torch.onnx.export(modelFullPrec, dummyInput,
"export.onnx", verbose=True,
input_names=['input'],
output_names=['output'])
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,126
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import torch.nn as nn
import math
from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
class MeyerNet(nn.Module):
"""Audio Event Detection quantized Network."""
def __init__(self, capacityFactor=1.0, version=1,
quantized=True, quant_scheme=None,
quantFirstLast=True, withTwoAct=False, noTimePooling=False):
super().__init__()
self.noTimePooling = noTimePooling
def conv1quant(quant_scheme, ni, no, stride=1, padding=1):
return StochasticConv2d(*quant_scheme, ni, no, kernel_size=1,
stride=stride, padding=0, bias=False)
def conv3quant(quant_scheme, ni, no, stride=1, padding=1):
return StochasticConv2d(*quant_scheme, ni, no, kernel_size=3,
stride=stride, padding=1, bias=False)
def conv1float(quant_scheme, ni, no, stride=1, padding=1):
return nn.Conv2d(ni, no, kernel_size=1,
stride=stride, padding=0, bias=False)
def conv3float(quant_scheme, ni, no, stride=1, padding=1):
return nn.Conv2d(ni, no, kernel_size=3,
stride=stride, padding=1, bias=False)
if quantized:
conv1 = conv1quant
conv3 = conv3quant
activ = lambda quant_scheme, nc: StochasticActivation(*quant_scheme, nc)
if withTwoAct:
activ2 = lambda nc: nn.ReLU(inplace=True)
else:
activ2 = lambda nc: nn.Identity()
quantScheme = lambda s: quant_scheme[s]
else:
conv1 = conv1float
conv3 = conv3float
activ = lambda quant_scheme, nc: nn.ReLU(inplace=True)
activ2 = lambda nc: nn.Identity()
quantScheme = lambda s: None
bnorm = lambda nc: nn.BatchNorm2d(nc)
# bnorm = lambda nc: nn.Identity() # don't forget to enable/disable bias
c = lambda v: math.ceil(v*capacityFactor)
c1, c2, c3, c4, c5, c6 = c(64), c(64), c(128), c(128), c(128), c(128)
if version >= 2:
c1 = c(32)
if quantFirstLast:
self.phi1_conv = conv3(quantScheme('phi1_conv'), 1, c1)
else:
self.phi1_conv = conv3float(None, 1, c1)
self.phi1_act2 = activ2(c1)
self.phi1_bn = bnorm(c1)
self.phi1_act = activ(quantScheme('phi1_act'), c1)
self.phi2_conv = conv3(quantScheme('phi2_conv'), c1, c2, stride=2)
self.phi2_act2 = activ2(c2)
self.phi2_bn = bnorm(c2)
self.phi2_act = activ(quantScheme('phi2_act'), c2)
self.phi3_conv = conv3(quantScheme('phi3_conv'), c2, c3)
self.phi3_act2 = activ2(c3)
self.phi3_bn = bnorm(c3)
self.phi3_act = activ(quantScheme('phi3_act'), c3)
if version >= 3:
self.phi4_do = nn.Dropout2d(0.5)
else:
self.phi4_do = nn.Identity()
self.phi4_conv = conv3(quantScheme('phi4_conv'), c3, c4, stride=2)
self.phi4_act2 = activ2(c4)
self.phi4_bn = bnorm(c4)
self.phi4_act = activ(quantScheme('phi4_act'), c4)
self.phi5_conv = conv3(quantScheme('phi5_conv'), c4, c5)
self.phi5_act2 = activ2(c5)
self.phi5_bn = bnorm(c5)
self.phi5_act = activ(quantScheme('phi5_act'), c5)
self.phi6_conv = conv1(quantScheme('phi6_conv'), c5, c6)
self.phi6_act2 = activ2(c6)
self.phi6_bn = bnorm(c6)
if quantFirstLast:
self.phi6_act = activ(quantScheme('phi6_act'), c6)
self.phi7_conv = conv1(quantScheme('phi7_conv'), c6, 28)
else:
self.phi6_act = nn.Identity()
self.phi7_conv = conv1float(None, c6, 28)
self.phi7_bn = bnorm(28)
if noTimePooling:
self.phi8_pool = nn.AvgPool2d(kernel_size=(16,1), stride=1, padding=0)
else:
self.phi8_pool = nn.AvgPool2d(kernel_size=(16,100), stride=1, padding=0)
def forward(self, x, withStats=False):
stats = []
x = self.phi1_conv(x)
x = self.phi1_act2(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_conv(x)
x = self.phi2_act2(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_conv(x)
x = self.phi3_act2(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_do(x)
x = self.phi4_conv(x)
x = self.phi4_act2(x)
x = self.phi4_bn(x)
x = self.phi4_act(x)
x = self.phi5_conv(x)
x = self.phi5_act2(x)
x = self.phi5_bn(x)
x = self.phi5_act(x)
x = self.phi6_conv(x)
x = self.phi6_act2(x)
x = self.phi6_bn(x)
x = self.phi6_act(x)
x = self.phi7_conv(x)
x = self.phi7_bn(x)
x = self.phi8_pool(x)
if self.noTimePooling:
x = x.permute(0,2,3,1).reshape(-1, 28)
else:
x = x.reshape(x.size(0), 28)
if withStats:
stats.append(('phi1_conv_w', self.phi1_conv.weight.data))
stats.append(('phi3_conv_w', self.phi3_conv.weight.data))
stats.append(('phi5_conv_w', self.phi5_conv.weight.data))
stats.append(('phi7_conv_w', self.phi7_conv.weight.data))
return stats, x
else:
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,127
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/stochastic_ops.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
# from scipy.stats import norm, uniform
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
#from .cuda import init_ffi_lib, UHP_forward, UHP_backward
class UniformHeavisideProcess(torch.autograd.Function):
"""A Stochastic Process composed by step functions.
This class defines a stochastic process whose elementary events are step
functions with fixed quantization levels (codominion) and uniform noise on
the jumps positions.
"""
@staticmethod
def forward(ctx, x, t, q, s, training):
ctx.save_for_backward(x, t, q, s)
t_shape = [*t.size()] + [1 for _ in range(x.dim())] # dimensions with size 1 enable broadcasting
x_minus_t = x - t.reshape(t_shape)
if training and s[0] != 0.:
sf_inv = 1 / s[0]
cdf = torch.clamp((0.5 * x_minus_t) * sf_inv + 0.5, 0., 1.)
else:
cdf = (x_minus_t >= 0.).float()
d = q[1:] - q[:-1]
sigma_x = q[0] + torch.sum(d.reshape(t_shape) * cdf, 0)
return sigma_x
@staticmethod
def backward(ctx, grad_incoming):
x, t, q, s = ctx.saved_tensors
t_shape = [*t.size()] + [1 for _ in range(x.dim())] # dimensions with size 1 enable broadcasting
x_minus_t = x - t.reshape(t_shape)
if s[1] != 0.:
sb_inv = 1 / s[1]
pdf = (torch.abs_(x_minus_t) <= s[1]).float() * (0.5 * sb_inv)
else:
pdf = torch.zeros_like(grad_incoming)
d = q[1:] - q[:-1]
local_jacobian = torch.sum(d.reshape(t_shape) * pdf, 0)
grad_outgoing = grad_incoming * local_jacobian
return grad_outgoing, None, None, None, None
class StochasticActivation(nn.Module):
"""Quantize scores."""
def __init__(self, process, thresholds, quant_levels):
super(StochasticActivation, self).__init__()
self.process = process
if self.process == 'uniform':
self.activate = UniformHeavisideProcess.apply
super(StochasticActivation, self).register_parameter('thresholds',
nn.Parameter(torch.Tensor(thresholds),
requires_grad=False))
super(StochasticActivation, self).register_parameter('quant_levels',
nn.Parameter(torch.Tensor(quant_levels),
requires_grad=False))
super(StochasticActivation, self).register_parameter('stddev',
nn.Parameter(torch.Tensor(torch.ones(2)),
requires_grad=False))
def set_stddev(self, stddev):
self.stddev.data = torch.Tensor(stddev).to(self.stddev)
def forward(self, x):
return self.activate(x, self.thresholds, self.quant_levels, self.stddev, self.training)
class StochasticLinear(nn.Module):
"""Affine transform with quantized parameters."""
def __init__(self, process, thresholds, quant_levels, in_features, out_features, bias=True):
super(StochasticLinear, self).__init__()
# set stochastic properties
self.process = process
if self.process == 'uniform':
self.activate_weight = UniformHeavisideProcess.apply
super(StochasticLinear, self).register_parameter('thresholds',
nn.Parameter(torch.Tensor(thresholds),
requires_grad=False))
super(StochasticLinear, self).register_parameter('quant_levels',
nn.Parameter(torch.Tensor(quant_levels),
requires_grad=False))
super(StochasticLinear, self).register_parameter('stddev',
nn.Parameter(torch.Tensor(torch.ones(2)),
requires_grad=False))
# set linear layer properties
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
# init weights near thresholds
self.weight.data.random_(to=len(self.thresholds.data))
self.weight.data = self.thresholds[self.weight.data.to(torch.long)]
self.weight.data = torch.add(self.weight.data, torch.zeros_like(self.weight.data).uniform_(-stdv, stdv))
# init biases
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def set_stddev(self, stddev):
self.stddev.data = torch.Tensor(stddev).to(self.stddev)
def forward(self, input):
weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)
return F.linear(input, weight, self.bias)
class _StochasticConvNd(nn.Module):
"""Cross-correlation transform with quantized parameters."""
def __init__(self, process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias):
super(_StochasticConvNd, self).__init__()
# set stochastic properties
self.process = process
if self.process == 'uniform':
self.activate_weight = UniformHeavisideProcess.apply
super(_StochasticConvNd, self).register_parameter('thresholds',
nn.Parameter(torch.Tensor(thresholds),
requires_grad=False))
super(_StochasticConvNd, self).register_parameter('quant_levels',
nn.Parameter(torch.Tensor(quant_levels),
requires_grad=False))
super(_StochasticConvNd, self).register_parameter('stddev',
nn.Parameter(torch.Tensor(torch.ones(2)),
requires_grad=False))
# set convolutional layer properties
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = nn.Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
# init weights near thresholds
self.weight.data.random_(to=len(self.thresholds.data))
self.weight.data = self.thresholds[self.weight.data.to(torch.long)]
self.weight.data = torch.add(self.weight.data, torch.zeros_like(self.weight.data).uniform_(-stdv, stdv))
# init biases
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def set_stddev(self, stddev):
self.stddev.data = torch.Tensor(stddev).to(self.stddev)
class StochasticConv1d(_StochasticConvNd):
def __init__(self, process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(StochasticConv1d, self).__init__(
process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride, padding, dilation, False, _single(0), groups, bias)
def forward(self, input):
weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)
return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class StochasticConv2d(_StochasticConvNd):
def __init__(self, process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(StochasticConv2d, self).__init__(
process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias)
def forward(self, input):
weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class StochasticConv3d(_StochasticConvNd):
def __init__(self, process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
super(StochasticConv3d, self).__init__(
process, thresholds, quant_levels,
in_channels, out_channels, kernel_size, stride, padding, dilation, False, _triple(0), groups, bias)
def forward(self, input):
weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)
return F.conv3d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,128
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import torch.nn as nn
#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
#from quantlab.indiv.ste_ops import STEActivation
from quantlab.ImageNet.MobileNetv2.mobilenetv2baseline import MobileNetv2Baseline
class MobileNetv2QuantWeight(MobileNetv2Baseline):
"""MobileNetv2 Convolutional Neural Network."""
def __init__(self, capacity=1, expansion=6, quant_schemes=None,
quantWeights=True, quantAct=True,
weightInqSchedule=None, weightInqLevels=None,
weightInqStrategy="magnitude", weightInqQuantInit=None,
quantSkipFirstLayer=False, quantSkipLastLayer=False,
quantDepthwSep=True, pretrained=False):
super().__init__(capacity, expansion)
assert(quantAct == False)
c0 = 3
t0 = int(32 * capacity)
c1 = int(16 * capacity)
t1 = c1 * expansion
c2 = int(24 * capacity)
t2 = c2 * expansion
c3 = int(32 * capacity)
t3 = c3 * expansion
c4 = int(64 * capacity)
t4 = c4 * expansion
c5 = int(96 * capacity)
t5 = c5 * expansion
c6 = int(160 * capacity)
t6 = c6 * expansion
c7 = int(320 * capacity)
c8 = max(int(1280 * capacity), 1280)
def conv2d(ni, no, kernel_size=3, stride=1, padding=1, groups=1, bias=False):
if (quantWeights and
(quantDepthwSep or
(ni != groups or ni != no))): # not depthw. sep. layer
assert(weightInqSchedule != None)
return INQConv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=bias,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
else:
return nn.Conv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=bias)
def activ():
return nn.ReLU6(inplace=True)
# first block
if quantSkipFirstLayer:
self.phi01_conv = conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)
else:
self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)
self.phi01_bn = nn.BatchNorm2d(t0)
self.phi01_act = activ()
self.phi02_conv = conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)
self.phi02_bn = nn.BatchNorm2d(t0)
self.phi02_act = activ()
self.phi03_conv = conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi03_bn = nn.BatchNorm2d(c1)
# second block
self.phi04_conv = conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi04_bn = nn.BatchNorm2d(t1)
self.phi04_act = activ()
self.phi05_conv = conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)
self.phi05_bn = nn.BatchNorm2d(t1)
self.phi05_act = activ()
self.phi06_conv = conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi06_bn = nn.BatchNorm2d(c2)
self.phi06_act = activ()
self.phi07_conv = conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi07_bn = nn.BatchNorm2d(t2)
self.phi07_act = activ()
self.phi08_conv = conv2d(t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)
self.phi08_bn = nn.BatchNorm2d(t2)
self.phi08_act = activ()
self.phi09_conv = conv2d(t2, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi09_bn = nn.BatchNorm2d(c2)
# third block
self.phi10_conv = conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi10_bn = nn.BatchNorm2d(t2)
self.phi10_act = activ()
self.phi11_conv = conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)
self.phi11_bn = nn.BatchNorm2d(t2)
self.phi11_act = activ()
self.phi12_conv = conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi12_bn = nn.BatchNorm2d(c3)
self.phi12_act = activ()
self.phi13_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi13_bn = nn.BatchNorm2d(t3)
self.phi13_act = activ()
self.phi14_conv = conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi14_bn = nn.BatchNorm2d(t3)
self.phi14_act = activ()
self.phi15_conv = conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi15_bn = nn.BatchNorm2d(c3)
self.phi15_act = activ()
self.phi16_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi16_bn = nn.BatchNorm2d(t3)
self.phi16_act = activ()
self.phi17_conv = conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi17_bn = nn.BatchNorm2d(t3)
self.phi17_act = activ()
self.phi18_conv = conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi18_bn = nn.BatchNorm2d(c3)
# fourth block
self.phi19_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi19_bn = nn.BatchNorm2d(t3)
self.phi19_act = activ()
self.phi20_conv = conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)
self.phi20_bn = nn.BatchNorm2d(t3)
self.phi20_act = activ()
self.phi21_conv = conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi21_bn = nn.BatchNorm2d(c4)
self.phi21_act = activ()
self.phi22_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi22_bn = nn.BatchNorm2d(t4)
self.phi22_act = activ()
self.phi23_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi23_bn = nn.BatchNorm2d(t4)
self.phi23_act = activ()
self.phi24_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi24_bn = nn.BatchNorm2d(c4)
self.phi24_act = activ()
self.phi25_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi25_bn = nn.BatchNorm2d(t4)
self.phi25_act = activ()
self.phi26_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi26_bn = nn.BatchNorm2d(t4)
self.phi26_act = activ()
self.phi27_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi27_bn = nn.BatchNorm2d(c4)
self.phi27_act = activ()
self.phi28_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi28_bn = nn.BatchNorm2d(t4)
self.phi28_act = activ()
self.phi29_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi29_bn = nn.BatchNorm2d(t4)
self.phi29_act = activ()
self.phi30_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi30_bn = nn.BatchNorm2d(c4)
# fifth block
self.phi31_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi31_bn = nn.BatchNorm2d(t4)
self.phi31_act = activ()
self.phi32_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi32_bn = nn.BatchNorm2d(t4)
self.phi32_act = activ()
self.phi33_conv = conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi33_bn = nn.BatchNorm2d(c5)
self.phi33_act = activ()
self.phi34_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi34_bn = nn.BatchNorm2d(t5)
self.phi34_act = activ()
self.phi35_conv = conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi35_bn = nn.BatchNorm2d(t5)
self.phi35_act = activ()
self.phi36_conv = conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi36_bn = nn.BatchNorm2d(c5)
self.phi36_act = activ()
self.phi37_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi37_bn = nn.BatchNorm2d(t5)
self.phi37_act = activ()
self.phi38_conv = conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi38_bn = nn.BatchNorm2d(t5)
self.phi38_act = activ()
self.phi39_conv = conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi39_bn = nn.BatchNorm2d(c5)
# sixth block
self.phi40_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi40_bn = nn.BatchNorm2d(t5)
self.phi40_act = activ()
self.phi41_conv = conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)
self.phi41_bn = nn.BatchNorm2d(t5)
self.phi41_act = activ()
self.phi42_conv = conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi42_bn = nn.BatchNorm2d(c6)
self.phi42_act = activ()
self.phi43_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi43_bn = nn.BatchNorm2d(t6)
self.phi43_act = activ()
self.phi44_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi44_bn = nn.BatchNorm2d(t6)
self.phi44_act = activ()
self.phi45_conv = conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi45_bn = nn.BatchNorm2d(c6)
self.phi45_act = activ()
self.phi46_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi46_bn = nn.BatchNorm2d(t6)
self.phi46_act = activ()
self.phi47_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi47_bn = nn.BatchNorm2d(t6)
self.phi47_act = activ()
self.phi48_conv = conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi48_bn = nn.BatchNorm2d(c6)
# seventh block
self.phi49_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi49_bn = nn.BatchNorm2d(t6)
self.phi49_act = activ()
self.phi50_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi50_bn = nn.BatchNorm2d(t6)
self.phi50_act = activ()
self.phi51_conv = conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)
self.phi51_bn = nn.BatchNorm2d(c7)
# classifier
self.phi52_conv = conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)
self.phi52_bn = nn.BatchNorm2d(c8)
self.phi52_act = activ()
self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)
assert(quantSkipLastLayer)
self.phi53_fc = nn.Linear(c8, 1000)
self._initialize_weights()
if pretrained:
self.loadPretrainedTorchVision()
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule,
clearOptimStateOnStep=True)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, INQConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) or isinstance(m, INQLinear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def loadPretrainedTorchVision(self):
import torchvision as tv
modelRef = tv.models.mobilenet_v2(pretrained=True)
stateDictRef = modelRef.state_dict()
remapping = {'features.0.0': 'phi01_conv',
'features.0.1': 'phi01_bn',
'features.1.conv.0.0': 'phi02_conv',
'features.1.conv.0.1': 'phi02_bn',
'features.1.conv.1': 'phi03_conv',
'features.1.conv.2': 'phi03_bn',
}
for i, layerBlock in enumerate(range(2,17+1)):
offset = 3*i + 4
rExt = {'features.%d.conv.0.0' % (layerBlock,) : 'phi%02d_conv' % (offset+0,),
'features.%d.conv.0.1' % (layerBlock,) : 'phi%02d_bn' % (offset+0,),
'features.%d.conv.1.0' % (layerBlock,) : 'phi%02d_conv' % (offset+1,),
'features.%d.conv.1.1' % (layerBlock,) : 'phi%02d_bn' % (offset+1,),
'features.%d.conv.2' % (layerBlock,) : 'phi%02d_conv' % (offset+2,),
'features.%d.conv.3' % (layerBlock,) : 'phi%02d_bn' % (offset+2,),
}
remapping.update(rExt)
rExt = {'features.18.0': 'phi52_conv',
'features.18.1': 'phi52_bn',
'classifier.1': 'phi53_fc'
}
remapping.update(rExt)
stateDictRefMapped = {ksd.replace(kremap, vremap): vsd
for ksd, vsd in stateDictRef.items()
for kremap, vremap in remapping.items()
if ksd.startswith(kremap)}
missingFields = {k: v
for k,v in self.state_dict().items()
if k not in stateDictRefMapped}
assert(len([k
for k in missingFields.keys()
if not (k.endswith('.sParam') or
k.endswith('.weightFrozen'))
]) == 0) # assert only INQ-specific fields missing
stateDictRefMapped.update(missingFields)
self.load_state_dict(stateDictRefMapped, strict=True)
if __name__ == '__main__':
model = MobileNetv2QuantWeight(quantAct=False, quantWeights=True,
weightInqSchedule={},
weightInqLevels=3,
weightInqStrategy="magnitude-SRQ",
weightInqQuantInit='uniform-perCh-l2opt',
quantSkipFirstLayer=True,
quantSkipLastLayer=True,
pretrained=True)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,129
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/AlexNet/alexnetbaseline.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import torch.nn as nn
# In order for the baselines to be launched with the same logic as quantized
# models, an empty quantization scheme and an empty thermostat schedule need
# to be configured.
# Use the following templates for the `net` and `thermostat` configurations:
#
# "net": {
# "class": "AlexNetBaseline",
# "params": {"capacity": 1},
# "pretrained": null,
# "loss_fn": {
# "class": "CrossEntropyLoss",
# "params": {}
# }
# }
#
# "thermostat": {
# "class": "AlexNetBaseline",
# "params": {
# "noise_schemes": {},
# "bindings": []
# }
# }
class AlexNetBaseline(nn.Module):
"""AlexNet Convolutional Neural Network."""
def __init__(self, capacity):
super().__init__()
c0 = 3
c1 = int(64 * capacity)
c2 = int(64 * 3 * capacity)
c3 = int(64 * 6 * capacity)
c4 = int(64 * 4 * capacity)
c5 = 256
nh = 4096
# convolutional layers
self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=11, stride=4, padding=2, bias=False)
self.phi1_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi1_bn = nn.BatchNorm2d(c1)
self.phi1_act = nn.ReLU6()
self.phi2_conv = nn.Conv2d(c1, c2, kernel_size=5, padding=2, bias=False)
self.phi2_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi2_bn = nn.BatchNorm2d(c2)
self.phi2_act = nn.ReLU6()
self.phi3_conv = nn.Conv2d(c2, c3, kernel_size=3, padding=1, bias=False)
self.phi3_bn = nn.BatchNorm2d(c3)
self.phi3_act = nn.ReLU6()
self.phi4_conv = nn.Conv2d(c3, c4, kernel_size=3, padding=1, bias=False)
self.phi4_bn = nn.BatchNorm2d(c4)
self.phi4_act = nn.ReLU6()
self.phi5_conv = nn.Conv2d(c4, c5, kernel_size=3, padding=1, bias=False)
self.phi5_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi5_bn = nn.BatchNorm2d(c5)
self.phi5_act = nn.ReLU6()
# fully connected layers
self.phi6_fc = nn.Linear(c5 * 6 * 6, nh, bias=False)
self.phi6_bn = nn.BatchNorm1d(nh)
self.phi6_act = nn.ReLU6()
self.phi7_fc = nn.Linear(nh, nh, bias=False)
self.phi7_bn = nn.BatchNorm1d(nh)
self.phi7_act = nn.ReLU6()
self.phi8_fc = nn.Linear(nh, 1000)
def forward(self, x, withStats=False):
x = self.phi1_conv(x)
x = self.phi1_mp(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_conv(x)
x = self.phi2_mp(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_conv(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_conv(x)
x = self.phi4_bn(x)
x = self.phi4_act(x)
x = self.phi5_conv(x)
x = self.phi5_mp(x)
x = self.phi5_bn(x)
x = self.phi5_act(x)
x = x.view(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())
x = self.phi6_fc(x)
x = self.phi6_bn(x)
x = self.phi6_act(x)
x = self.phi7_fc(x)
x = self.phi7_bn(x)
x = self.phi7_act(x)
x = self.phi8_fc(x)
x = self.phi8_bn(x)
if withStats:
stats = []
stats.append(('phi1_conv_w', self.phi1_conv.weight.data))
stats.append(('phi2_conv_w', self.phi2_conv.weight.data))
stats.append(('phi3_conv_w', self.phi3_conv.weight.data))
stats.append(('phi4_conv_w', self.phi4_conv.weight.data))
stats.append(('phi5_conv_w', self.phi5_conv.weight.data))
stats.append(('phi6_fc_w', self.phi6_fc.weight.data))
stats.append(('phi7_fc_w', self.phi7_fc.weight.data))
stats.append(('phi8_fc_w', self.phi8_fc.weight.data))
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,130
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/ResNet/postprocess.py
|
../MobileNetv2/postprocess.py
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,131
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torchvision as tv
import pickle
import os
import numpy as np
import torch
class PickleDictionaryNumpyDataset(tv.datasets.VisionDataset):
"""Looks for a train.pickle or test.pickle file within root. The file has
to contain a dictionary with classes as keys and a numpy array with the
data. First dimension of the numpy array is the sample index.
Args:
root (string): Root directory path.
train (bool, default=True): defines whether to load the train or test set.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
data (numpy array): All the data samples. First dim are different samples.
targets (list): The class_index value for each image in the dataset.
"""
def __init__(self, root, train=True, transform=None, target_transform=None):
super().__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if self.train:
path = os.path.join(root, 'train.pickle')
else:
path = os.path.join(root, 'test.pickle')
with open(path, 'rb') as f:
dataset = pickle.load(f)
dataset = dataset.items()
self.classes = [k for k, v in dataset] # assume: train set contains all classes
self.classes.sort()
self.class_to_idx = {cl: i for i, cl in enumerate(self.classes)}
self.data = np.stack([v[i] for k, v in dataset for i in range(len(v))], axis=0) #np.concatenate(list(dataset.values()))
self.targets = [self.class_to_idx[k]
for k, v in dataset
for i in range(len(v))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
sample = self.data[index]
target = self.targets[index]
if self.transform is not None:
sample = self.transform(sample) # note: dimensionaility here is atypical (not 3 dims, only 2)
if self.target_transform is not None:
target = self.target_transform(target)
return torch.from_numpy(sample).float().mul(1/2**15).unsqueeze(0).contiguous(), target
def __len__(self):
return len(self.data)
def _get_transforms(augment):
assert(augment == False)
# normMean = tuple([0]*64)
# normStddev = tuple([2**16/2]*64)
# train_t = tv.transforms.Compose([
# tv.transforms.ToTensor(),
# tv.transforms.Normalize(mean=normMean, std=normStddev)])
# valid_t = tv.transforms.Compose([
# tv.transforms.ToTensor(),
# tv.transforms.Normalize(mean=normMean, std=normStddev)])
# train_t = tv.transforms.Compose([tv.transforms.ToTensor()])
# valid_t = tv.transforms.Compose([tv.transforms.ToTensor()])
train_t = None
valid_t = None
if not augment:
train_t = valid_t
transforms = {
'training': train_t,
'validation': valid_t
}
return transforms
def load_data_sets(dir_data, data_config):
augment = data_config['augment']
transforms = _get_transforms(augment)
trainset = PickleDictionaryNumpyDataset(dir_data, train=True,
transform=transforms['training'])
validset = PickleDictionaryNumpyDataset(dir_data, train=False,
transform=transforms['validation'])
return trainset, validset, None
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,132
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/CIFAR-10/VGG/preprocess.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
import torchvision
from torchvision.transforms import RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose
from quantlab.treat.data.split import transform_random_split
_CIFAR10 = {
'Normalize': {
'mean': (0.4914, 0.4822, 0.4465),
'std': (0.2470, 0.2430, 0.2610)
}
}
def get_transforms(augment):
train_t = Compose([RandomCrop(32, padding=4),
RandomHorizontalFlip(),
ToTensor(),
Normalize(**_CIFAR10['Normalize'])])
valid_t = Compose([ToTensor(),
Normalize(**_CIFAR10['Normalize'])])
if not augment:
train_t = valid_t
transforms = {
'training': train_t,
'validation': valid_t
}
return transforms
def load_data_sets(dir_data, data_config):
transforms = get_transforms(data_config['augment'])
trainvalid_set = torchvision.datasets.CIFAR10(root=dir_data, train=True, download=True)
if 'useTestForVal' in data_config.keys() and data_config['useTestForVal'] == True:
train_set, valid_set = transform_random_split(trainvalid_set,
[len(trainvalid_set), 0],
[transforms['training'], transforms['validation']])
test_set = torchvision.datasets.CIFAR10(root=dir_data, train=False,
download=True,
transform=transforms['validation'])
valid_set = test_set
print('using test set for validation.')
else:
len_train = int(len(trainvalid_set) * (1.0 - data_config['valid_fraction']))
train_set, valid_set = transform_random_split(trainvalid_set,
[len_train, len(trainvalid_set) - len_train],
[transforms['training'], transforms['validation']])
test_set = torchvision.datasets.CIFAR10(root=dir_data, train=False,
download=True,
transform=transforms['validation'])
return train_set, valid_set, test_set
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,133
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py
|
# Copyright (c) 2019 Tibor Schneider
import numpy as np
import torch as t
import torch.nn.functional as F
class EEGNetBaseline(t.nn.Module):
"""
EEGNet
In order for the baseline to be launched with the same logic as the quantized models, an empty
quantization scheme and an empty thermostat schedule needs to be configured.
Use the following templates for the 'net' and 'thermostat' configurations (for the "net" object,
all params can be omitted to use the default ones):
"net": {
"class": "EEGNetBaseline",
"params": {
"F1": 8,
"D": 2,
"F2": 16,
"C": 22,
"T": 1125,
"N": 4,
"p_dropout": 0.5,
"activation": "relu",
"dropout_type": "TimeDropout2D",
},
"pretrained": null,
"loss_fn": {
"class": "CrossEntropyLoss",
"params": {}
}
}
"thermostat": {
"class": "EEGNetBaseline",
"params": {
"noise_schemes": {},
"bindings": []
}
}
"""
def __init__(self, F1=8, D=2, F2=None, C=22, T=1125, N=4, p_dropout=0.5, activation='relu',
dropout_type='TimeDropout2D'):
"""
F1: Number of spectral filters
D: Number of spacial filters (per spectral filter), F2 = F1 * D
F2: Number or None. If None, then F2 = F1 * D
C: Number of EEG channels
T: Number of time samples
N: Number of classes
p_dropout: Dropout Probability
activation: string, either 'elu' or 'relu'
dropout_type: string, either 'dropout', 'SpatialDropout2d' or 'TimeDropout2D'
"""
super(EEGNetBaseline, self).__init__()
# prepare network constants
if F2 is None:
F2 = F1 * D
# check the activation input
activation = activation.lower()
assert activation in ['elu', 'relu']
# Prepare Dropout Type
if dropout_type.lower() == 'dropout':
dropout = t.nn.Dropout
elif dropout_type.lower() == 'spatialdropout2d':
dropout = t.nn.Dropout2d
elif dropout_type.lower() == 'timedropout2d':
dropout = TimeDropout2d
else:
raise ValueError("dropout_type must be one of SpatialDropout2d, Dropout or "
"WrongDropout2d")
# store local values
self.F1, self.D, self.F2, self.C, self.T, self.N = (F1, D, F2, C, T, N)
self.p_dropout, self.activation = (p_dropout, activation)
# Number of input neurons to the final fully connected layer
n_features = (T // 8) // 8
# Block 1
self.conv1_pad = t.nn.ZeroPad2d((31, 32, 0, 0))
self.conv1 = t.nn.Conv2d(1, F1, (1, 64), bias=False)
self.batch_norm1 = t.nn.BatchNorm2d(F1, momentum=0.01, eps=0.001)
self.conv2 = t.nn.Conv2d(F1, D * F1, (C, 1), groups=F1, bias=False)
self.batch_norm2 = t.nn.BatchNorm2d(D * F1, momentum=0.01, eps=0.001)
self.activation1 = t.nn.ELU(inplace=True) if activation == 'elu' else t.nn.ReLU(inplace=True)
self.pool1 = t.nn.AvgPool2d((1, 8))
# self.dropout1 = dropout(p=p_dropout)
self.dropout1 = t.nn.Dropout(p=p_dropout)
# Block 2
self.sep_conv_pad = t.nn.ZeroPad2d((7, 8, 0, 0))
self.sep_conv1 = t.nn.Conv2d(D * F1, D * F1, (1, 16), groups=D * F1, bias=False)
self.sep_conv2 = t.nn.Conv2d(D * F1, F2, (1, 1), bias=False)
self.batch_norm3 = t.nn.BatchNorm2d(F2, momentum=0.01, eps=0.001)
self.activation2 = t.nn.ELU(inplace=True) if activation == 'elu' else t.nn.ReLU(inplace=True)
self.pool2 = t.nn.AvgPool2d((1, 8))
self.dropout2 = dropout(p=p_dropout)
# Fully connected layer (classifier)
self.flatten = Flatten()
self.fc = t.nn.Linear(F2 * n_features, N, bias=True)
# initialize weights
self._initialize_params()
def forward(self, x, with_stats=False):
# input dimensions: (s, 1, C, T)
# Block 1
x = self.conv1_pad(x)
x = self.conv1(x) # output dim: (s, F1, C, T-1)
x = self.batch_norm1(x)
x = self.conv2(x) # output dim: (s, D * F1, 1, T-1)
x = self.batch_norm2(x)
x = self.activation1(x)
x = self.pool1(x) # output dim: (s, D * F1, 1, T // 8)
x = self.dropout1(x)
# Block2
x = self.sep_conv_pad(x)
x = self.sep_conv1(x) # output dim: (s, D * F1, 1, T // 8 - 1)
x = self.sep_conv2(x) # output dim: (s, F2, 1, T // 8 - 1)
x = self.batch_norm3(x)
x = self.activation2(x)
x = self.pool2(x) # output dim: (s, F2, 1, T // 64)
x = self.dropout2(x)
# Classification
x = self.flatten(x) # output dim: (s, F2 * (T // 64))
x = self.fc(x) # output dim: (s, N)
if with_stats:
stats = [('conv1_w', self.conv1.weight.data),
('conv2_w', self.conv2.weight.data),
('sep_conv1_w', self.sep_conv1.weight.data),
('sep_conv2_w', self.sep_conv2.weight.data),
('fc_w', self.fc.weight.data),
('fc_b', self.fc.bias.data)]
return stats, x
return x
def forward_with_tensor_stats(self, x):
return self.forward(x, with_stats=True)
def _initialize_params(self, weight_init=t.nn.init.xavier_uniform_, bias_init=t.nn.init.zeros_):
"""
Initializes all the parameters of the model
Parameters:
- weight_init: t.nn.init inplace function
- bias_init: t.nn.init inplace function
"""
def init_weight(m):
if isinstance(m, t.nn.Conv2d) or isinstance(m, t.nn.Linear):
weight_init(m.weight)
if isinstance(m, t.nn.Linear):
bias_init(m.bias)
self.apply(init_weight)
class TimeDropout2d(t.nn.Dropout2d):
"""
Dropout layer, where the last dimension is treated as channels
"""
def __init__(self, p=0.5, inplace=False):
"""
See t.nn.Dropout2d for parameters
"""
super(TimeDropout2d, self).__init__(p=p, inplace=inplace)
def forward(self, input):
if self.training:
input = input.permute(0, 3, 1, 2)
input = F.dropout2d(input, self.p, True, self.inplace)
input = input.permute(0, 2, 3, 1)
return input
class Flatten(t.nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,134
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/utils/meter.py
|
../../CIFAR-10/utils/meter.py
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,135
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import torch.nn as nn
from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
from quantlab.indiv.ste_ops import STEActivation
from quantlab.ImageNet.MobileNetv2.mobilenetv2baseline import MobileNetv2Baseline
class MobileNetv2Residuals(MobileNetv2Baseline):
"""MobileNetv2 Convolutional Neural Network."""
def __init__(self, capacity=1, expansion=6, quant_schemes=None,
quantAct=True, quantActSTENumLevels=None, quantWeights=True,
weightInqSchedule=None, weightInqBits=2, weightInqStrategy="magnitude",
quantSkipFirstLayer=False):
super().__init__(capacity, expansion)
c0 = 3
t0 = int(32 * capacity) * 1
c1 = int(16 * capacity)
t1 = c1 * expansion
c2 = int(24 * capacity)
t2 = c2 * expansion
c3 = int(32 * capacity)
t3 = c3 * expansion
c4 = int(64 * capacity)
t4 = c4 * expansion
c5 = int(96 * capacity)
t5 = c5 * expansion
c6 = int(160 * capacity)
t6 = c6 * expansion
c7 = int(320 * capacity)
c8 = max(int(1280 * capacity), 1280)
def activ(name, nc):
if quantAct:
if quantActSTENumLevels != None and quantActSTENumLevels > 0:
return STEActivation(startEpoch=0,
numLevels=quantActSTENumLevels)
else:
return StochasticActivation(*quant_schemes[name], nc)
else:
assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)
return nn.ReLU(inplace=True)
def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticConv2d(*quant_schemes[name], ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
else:
return INQConv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias,
numBits=weightInqBits, strategy=weightInqStrategy)
else:
return nn.Conv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
def linear(name, ni, no, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)
else:
return INQLinear(ni, no, bias=bias,
numBits=weightInqBits, strategy=weightInqStrategy)
else:
return nn.Linear(ni, no, bias=bias)
assert(False) # IMPLEMENTATION INCOMPLETE!!!!
# first block
self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)
self.phi01_bn = nn.BatchNorm2d(t0)
self.phi01_act = nn.ReLU6(inplace=True)
self.phi02_conv = nn.Conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)
self.phi02_bn = nn.BatchNorm2d(t0)
self.phi02_act = nn.ReLU6(inplace=True)
self.phi03_conv = nn.Conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi03_bn = nn.BatchNorm2d(c1)
# second block
self.phi04_conv = nn.Conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi04_bn = nn.BatchNorm2d(t1)
self.phi04_act = nn.ReLU6(inplace=True)
self.phi05_conv = nn.Conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)
self.phi05_bn = nn.BatchNorm2d(t1)
self.phi05_act = nn.ReLU6(inplace=True)
self.phi06_conv = nn.Conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi06_bn = nn.BatchNorm2d(c2)
self.phi06_act = StochasticActivation(*quant_schemes['phi06_act'])
self.phi07_conv = StochasticConv2d(*quant_schemes['phi07_conv'], c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi07_bn = nn.BatchNorm2d(t2)
self.phi07_act = StochasticActivation(*quant_schemes['phi07_act'])
self.phi08_conv = StochasticConv2d(*quant_schemes['phi08_conv'], t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)
self.phi08_bn = nn.BatchNorm2d(t2)
self.phi08_act = StochasticActivation(*quant_schemes['phi08_act'])
self.phi09_conv = StochasticConv2d(*quant_schemes['phi09_conv'], t2, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi09_bn = nn.BatchNorm2d(c2)
# third block
self.phi10_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi10_bn = nn.BatchNorm2d(t2)
self.phi10_act = nn.ReLU6(inplace=True)
self.phi11_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)
self.phi11_bn = nn.BatchNorm2d(t2)
self.phi11_act = nn.ReLU6(inplace=True)
self.phi12_conv = nn.Conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi12_bn = nn.BatchNorm2d(c3)
self.phi12_act = StochasticActivation(*quant_schemes['phi12_act'])
self.phi13_conv = StochasticConv2d(*quant_schemes['phi13_conv'], c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi13_bn = nn.BatchNorm2d(t3)
self.phi13_act = StochasticActivation(*quant_schemes['phi13_act'])
self.phi14_conv = StochasticConv2d(*quant_schemes['phi14_conv'], t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi14_bn = nn.BatchNorm2d(t3)
self.phi14_act = StochasticActivation(*quant_schemes['phi14_act'])
self.phi15_conv = StochasticConv2d(*quant_schemes['phi15_conv'], t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi15_bn = nn.BatchNorm2d(c3)
self.phi15_act = StochasticActivation(*quant_schemes['phi15_act'])
self.phi16_conv = StochasticConv2d(*quant_schemes['phi16_conv'], c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi16_bn = nn.BatchNorm2d(t3)
self.phi16_act = StochasticActivation(*quant_schemes['phi16_act'])
self.phi17_conv = StochasticConv2d(*quant_schemes['phi17_conv'], t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi17_bn = nn.BatchNorm2d(t3)
self.phi17_act = StochasticActivation(*quant_schemes['phi17_act'])
self.phi18_conv = StochasticConv2d(*quant_schemes['phi18_conv'], t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi18_bn = nn.BatchNorm2d(c3)
# fourth block
self.phi19_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi19_bn = nn.BatchNorm2d(t3)
self.phi19_act = nn.ReLU6(inplace=True)
self.phi20_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)
self.phi20_bn = nn.BatchNorm2d(t3)
self.phi20_act = nn.ReLU6(inplace=True)
self.phi21_conv = nn.Conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi21_bn = nn.BatchNorm2d(c4)
self.phi21_act = StochasticActivation(*quant_schemes['phi21_act'])
self.phi22_conv = StochasticConv2d(*quant_schemes['phi22_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi22_bn = nn.BatchNorm2d(t4)
self.phi22_act = StochasticActivation(*quant_schemes['phi22_act'])
self.phi23_conv = StochasticConv2d(*quant_schemes['phi23_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi23_bn = nn.BatchNorm2d(t4)
self.phi23_act = StochasticActivation(*quant_schemes['phi23_act'])
self.phi24_conv = StochasticConv2d(*quant_schemes['phi24_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi24_bn = nn.BatchNorm2d(c4)
self.phi24_act = StochasticActivation(*quant_schemes['phi24_act'])
self.phi25_conv = StochasticConv2d(*quant_schemes['phi25_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi25_bn = nn.BatchNorm2d(t4)
self.phi25_act = StochasticActivation(*quant_schemes['phi25_act'])
self.phi26_conv = StochasticConv2d(*quant_schemes['phi26_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi26_bn = nn.BatchNorm2d(t4)
self.phi26_act = StochasticActivation(*quant_schemes['phi26_act'])
self.phi27_conv = StochasticConv2d(*quant_schemes['phi27_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi27_bn = nn.BatchNorm2d(c4)
self.phi27_act = StochasticActivation(*quant_schemes['phi27_act'])
self.phi28_conv = StochasticConv2d(*quant_schemes['phi28_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi28_bn = nn.BatchNorm2d(t4)
self.phi28_act = StochasticActivation(*quant_schemes['phi28_act'])
self.phi29_conv = StochasticConv2d(*quant_schemes['phi29_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi29_bn = nn.BatchNorm2d(t4)
self.phi29_act = StochasticActivation(*quant_schemes['phi29_act'])
self.phi30_conv = StochasticConv2d(*quant_schemes['phi30_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi30_bn = nn.BatchNorm2d(c4)
# fifth block
self.phi31_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi31_bn = nn.BatchNorm2d(t4)
self.phi31_act = nn.ReLU6(inplace=True)
self.phi32_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi32_bn = nn.BatchNorm2d(t4)
self.phi32_act = nn.ReLU6(inplace=True)
self.phi33_conv = nn.Conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi33_bn = nn.BatchNorm2d(c5)
self.phi33_act = StochasticActivation(*quant_schemes['phi33_act'])
self.phi34_conv = StochasticConv2d(*quant_schemes['phi34_conv'], c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi34_bn = nn.BatchNorm2d(t5)
self.phi34_act = StochasticActivation(*quant_schemes['phi34_act'])
self.phi35_conv = StochasticConv2d(*quant_schemes['phi35_conv'], t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi35_bn = nn.BatchNorm2d(t5)
self.phi35_act = StochasticActivation(*quant_schemes['phi35_act'])
self.phi36_conv = StochasticConv2d(*quant_schemes['phi36_conv'], t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi36_bn = nn.BatchNorm2d(c5)
self.phi36_act = StochasticActivation(*quant_schemes['phi36_act'])
self.phi37_conv = StochasticConv2d(*quant_schemes['phi37_conv'], c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi37_bn = nn.BatchNorm2d(t5)
self.phi37_act = StochasticActivation(*quant_schemes['phi37_act'])
self.phi38_conv = StochasticConv2d(*quant_schemes['phi38_conv'], t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi38_bn = nn.BatchNorm2d(t5)
self.phi38_act = StochasticActivation(*quant_schemes['phi38_act'])
self.phi39_conv = StochasticConv2d(*quant_schemes['phi39_conv'], t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi39_bn = nn.BatchNorm2d(c5)
# sixth block
self.phi40_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi40_bn = nn.BatchNorm2d(t5)
self.phi40_act = nn.ReLU6(inplace=True)
self.phi41_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)
self.phi41_bn = nn.BatchNorm2d(t5)
self.phi41_act = nn.ReLU6(inplace=True)
self.phi42_conv = nn.Conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi42_bn = nn.BatchNorm2d(c6)
self.phi42_act = StochasticActivation(*quant_schemes['phi42_act'])
self.phi43_conv = StochasticConv2d(*quant_schemes['phi43_conv'], c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi43_bn = nn.BatchNorm2d(t6)
self.phi43_act = StochasticActivation(*quant_schemes['phi43_act'])
self.phi44_conv = StochasticConv2d(*quant_schemes['phi44_conv'], t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi44_bn = nn.BatchNorm2d(t6)
self.phi44_act = StochasticActivation(*quant_schemes['phi44_act'])
self.phi45_conv = StochasticConv2d(*quant_schemes['phi45_conv'], t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi45_bn = nn.BatchNorm2d(c6)
self.phi45_act = StochasticActivation(*quant_schemes['phi45_act'])
self.phi46_conv = StochasticConv2d(*quant_schemes['phi46_conv'], c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi46_bn = nn.BatchNorm2d(t6)
self.phi46_act = StochasticActivation(*quant_schemes['phi46_act'])
self.phi47_conv = StochasticConv2d(*quant_schemes['phi47_conv'], t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi47_bn = nn.BatchNorm2d(t6)
self.phi47_act = StochasticActivation(*quant_schemes['phi47_act'])
self.phi48_conv = StochasticConv2d(*quant_schemes['phi48_conv'], t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi48_bn = nn.BatchNorm2d(c6)
# seventh block
self.phi49_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi49_bn = nn.BatchNorm2d(t6)
self.phi49_act = nn.ReLU6(inplace=True)
self.phi50_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi50_bn = nn.BatchNorm2d(t6)
self.phi50_act = nn.ReLU6(inplace=True)
self.phi51_conv = nn.Conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)
self.phi51_bn = nn.BatchNorm2d(c7)
# classifier
self.phi52_conv = nn.Conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)
self.phi52_bn = nn.BatchNorm2d(c8)
self.phi52_act = nn.ReLU6(inplace=True)
self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)
self.phi53_fc = nn.Linear(c8, 1000)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,136
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/AlexNet/alexnet.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import torch.nn as nn
from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
from quantlab.indiv.ste_ops import STEActivation
class AlexNet(nn.Module):
"""Quantized AlexNet (both weights and activations)."""
def __init__(self, capacity=1, quant_schemes=None,
quantAct=True, quantActSTENumLevels=None, quantWeights=True,
weightInqSchedule=None, weightInqBits=None, weightInqLevels=None,
weightInqStrategy="magnitude",
quantSkipFirstLayer=False, quantSkipLastLayer=False,
withDropout=False, alternateSizes=False, weightInqQuantInit=None):
super().__init__()
assert(weightInqBits == None or weightInqLevels == None)
if weightInqBits != None:
print('warning: weightInqBits deprecated')
if weightInqBits == 1:
weightInqLevels = 2
elif weightInqBits >= 2:
weightInqLevels = 2**weightInqBits
else:
assert(False)
def activ(name, nc):
if quantAct:
if quantActSTENumLevels != None and quantActSTENumLevels > 0:
return STEActivation(startEpoch=0,
numLevels=quantActSTENumLevels)
else:
return StochasticActivation(*quant_schemes[name], nc)
else:
assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)
return nn.ReLU(inplace=True)
def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticConv2d(*quant_schemes[name], ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
else:
return INQConv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
else:
return nn.Conv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
def linear(name, ni, no, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)
else:
return INQLinear(ni, no, bias=bias,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
else:
return nn.Linear(ni, no, bias=bias)
def dropout(p=0.5):
if withDropout:
return nn.Dropout(p)
else:
return nn.Identity()
if alternateSizes:
#following LQ-net
c0 = 3
c1 = int(96 * capacity)
c2 = int(256 * capacity)
c3 = int(384 * capacity)
c4 = int(384 * capacity)
c5 = 256
nh = 4096
else:
c0 = 3
c1 = int(64 * capacity)
c2 = int(192 * capacity)
c3 = int(384 * capacity)
c4 = int(256 * capacity)
c5 = 256
nh = 4096
# convolutional layers
if quantSkipFirstLayer:
self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=11,
stride=4, padding=2, bias=False)
else:
self.phi1_conv = conv2d('phi1_conv', c0, c1, kernel_size=11,
stride=4, padding=2, bias=False)
self.phi1_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi1_bn = nn.BatchNorm2d(c1)
self.phi1_act = activ('phi1_act', c1)
self.phi2_conv = conv2d('phi2_conv', c1, c2, kernel_size=5, padding=2, bias=False)
self.phi2_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi2_bn = nn.BatchNorm2d(c2)
self.phi2_act = activ('phi2_act', c2)
self.phi3_conv = conv2d('phi3_conv', c2, c3, kernel_size=3, padding=1, bias=False)
self.phi3_bn = nn.BatchNorm2d(c3)
self.phi3_act = activ('phi3_act', c3)
self.phi4_conv = conv2d('phi4_conv', c3, c4, kernel_size=3, padding=1, bias=False)
self.phi4_bn = nn.BatchNorm2d(c4)
self.phi4_act = activ('phi4_act', c4)
self.phi5_conv = conv2d('phi5_conv', c4, c5, kernel_size=3, padding=1, bias=False)
self.phi5_mp = nn.MaxPool2d(kernel_size=3, stride=2)
self.phi5_bn = nn.BatchNorm2d(c5)
self.phi5_act = activ('phi5_act', c5)
# fully connected layers
self.phi6_do = dropout()
self.phi6_fc = linear('phi6_fc', c5*6*6, nh, bias=False)
self.phi6_bn = nn.BatchNorm1d(nh)
self.phi6_act = activ('phi6_act', nh)
self.phi7_do = dropout()
self.phi7_fc = linear('phi7_fc', nh, nh, bias=False)
self.phi7_bn = nn.BatchNorm1d(nh)
self.phi7_act = activ('phi7_act', nh)
if quantSkipLastLayer:
self.phi8_fc = nn.Linear(nh, 1000, bias=False)
else:
self.phi8_fc = linear('phi8_fc', nh, 1000, bias=False)
self.phi8_bn = nn.BatchNorm1d(1000)
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule,
clearOptimStateOnStep=True)
def forward(self, x, withStats=False):
x = self.phi1_conv(x)
x = self.phi1_mp(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_conv(x)
x = self.phi2_mp(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_conv(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_conv(x)
x = self.phi4_bn(x)
x = self.phi4_act(x)
x = self.phi5_conv(x)
x = self.phi5_mp(x)
x = self.phi5_bn(x)
x = self.phi5_act(x)
x = x.view(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())
x = self.phi6_do(x)
x = self.phi6_fc(x)
x = self.phi6_bn(x)
x = self.phi6_act(x)
x = self.phi7_do(x)
x = self.phi7_fc(x)
x = self.phi7_bn(x)
x = self.phi7_act(x)
x = self.phi8_fc(x)
x = self.phi8_bn(x)
if withStats:
stats = []
stats.append(('phi1_conv_w', self.phi1_conv.weight.data))
stats.append(('phi2_conv_w', self.phi2_conv.weight.data))
stats.append(('phi3_conv_w', self.phi3_conv.weight.data))
stats.append(('phi4_conv_w', self.phi4_conv.weight.data))
stats.append(('phi5_conv_w', self.phi5_conv.weight.data))
stats.append(('phi6_fc_w', self.phi6_fc.weight.data))
stats.append(('phi7_fc_w', self.phi7_fc.weight.data))
stats.append(('phi8_fc_w', self.phi8_fc.weight.data))
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
if __name__ == '__main__':
model = AlexNet(quantAct=False, quantWeights=True,
weightInqSchedule={}, weightInqBits=2,
weightInqStrategy="magnitude-SRQ",
quantSkipFirstLayer=True)
import torchvision as tv
modelRef = tv.models.alexnet(pretrained=True)
stateDictRef = modelRef.state_dict()
#batch normalization not in original model...?!
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,137
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/GoogLeNet/__init__.py
|
from .preprocess import load_data_sets
from .postprocess import postprocess_pr, postprocess_gt
from .googlenet import GoogLeNet
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,138
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/GoogLeNet/googlenet.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
# large parts of the code taken or adapted from torchvision
import warnings
from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
#from quantlab.indiv.ste_ops import STEActivation
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, quantized=True, **kwargs):
super(BasicConv2d, self).__init__()
if quantized:
self.conv = INQConv2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj,
numLevels=3, strategy="magnitude", quantInitMethod=None):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod)
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1,
numLevels=numLevels, strategy=strategy,
quantInitMethod=quantInitMethod)
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000, quant_schemes=None,
quantWeights=True, quantAct=True,
weightInqSchedule=None, weightInqLevels=None,
weightInqStrategy="magnitude", weightInqQuantInit=None,
quantSkipFirstLayer=False, quantSkipLastLayer=False, pretrained=False):
super().__init__()
assert(quantAct == False)
assert(quantSkipFirstLayer)
assert(quantSkipLastLayer)
self.conv1 = BasicConv2d(3, 64, quantized=False,
kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
self._initialize_weights()
if pretrained:
from torch.hub import load_state_dict_from_url
state_dict = load_state_dict_from_url(model_urls['googlenet'])
missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)
#filter out expected mismatches
#(missing auxiliary outputs in model, missing INQ params in pretrained data)
missing_keys_nonInq = [s for s in missing_keys
if not (s.endswith('.sParam') or
s.endswith('.weightFrozen'))]
unexpected_keys_nonAux = [s for s in unexpected_keys
if not s.startswith('aux')]
assert(len(unexpected_keys_nonAux) == 0)
assert(len(missing_keys_nonInq) == 0)
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule,
clearOptimStateOnStep=True)
def _initialize_weights(self):
for m in self.modules():
if (isinstance(m, nn.Conv2d) or
isinstance(m, INQConv2d) or
isinstance(m, nn.Linear)):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, withStats=False):
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if withStats:
stats = []
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
if __name__ == "__main__":
model = GoogLeNet(quantAct=False, weightInqSchedule={},
quantSkipFirstLayer=True, quantSkipLastLayer=True,
pretrained=True)
loadModel = False
if loadModel:
# path = '../../../ImageNet/logs/exp038/saves/best-backup.ckpt' # BWN
# path = '../../../ImageNet/logs/exp043/saves/best.ckpt' # TWN
path = '../../../ImageNet/logs/exp054/saves/best.ckpt' # BWN
fullState = torch.load(path, map_location='cpu')
netState = fullState['indiv']['net']
model.load_state_dict(netState)
import matplotlib.pyplot as plt
layerNames = list(netState.keys())
selectedLayers = ['layer4.0.conv1',
'layer2.1.conv2',
'layer1.0.conv2']
# selectedLayers = [l + '.weight' for l in selectedLayers]
selectedLayers = [l + '.weightFrozen' for l in selectedLayers]
_, axarr = plt.subplots(len(selectedLayers))
for ax, layerName in zip(axarr, selectedLayers):
plt.sca(ax)
plt.hist(netState[layerName].flatten(),
bins=201, range=(-3,3))
plt.xlim(-3,3)
plt.title(layerName)
exportONNX = False
if exportONNX:
modelFullPrec = GoogLeNet(quantAct=False, quantWeights=False,
weightInqSchedule={},
quantSkipFirstLayer=True,
quantSkipLastLayer=True,
pretrained=True)
dummyInput = torch.randn(1, 3, 224, 224)
pbuf = torch.onnx.export(modelFullPrec, dummyInput,
"export.onnx", verbose=True,
input_names=['input'],
output_names=['output'])
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,139
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
import torch
def postprocess_pr(pr_outs):
_, pr_outs = torch.max(pr_outs, dim=1)
return [p.item() for p in pr_outs.detach().cpu()]
def postprocess_gt(gt_labels):
return [l.item() for l in gt_labels.detach().cpu()]
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,140
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/MobileNetv2/preprocess.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import os
import torch
import torchvision
from torchvision.transforms import RandomResizedCrop, RandomHorizontalFlip, Resize, RandomCrop, CenterCrop, ToTensor, Normalize, Compose
_ImageNet = {
'Normalize': {
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225)
},
'PCA': {
'eigvals': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvecs': torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
}
}
class Grayscale(object):
def __init__(self):
self._Rec601 = {
'red': 0.299,
'green': 0.587,
'blue': 0.114
}
def __call__(self, img):
# uses the Recommendation 601 (Rec. 601) RGB-to-YCbCr conversion
gs = img.clone()
gs[0].mul_(self._Rec601['red']).add_(self._Rec601['green'], gs[1]).add_(self._Rec601['blue'], gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs
class Brightness(object):
def __init__(self, alphamax):
self.alphamax = alphamax
def __call__(self, img):
# when alpha = 0., the image does not change
# when alpha = alphamax (<= 1.), the image goes black
gs = torch.zeros_like(img)
alpha = self.alphamax * torch.rand(1).item()
return torch.lerp(img, gs, alpha)
class Contrast(object):
def __init__(self, alphamax):
self.alphamax = alphamax
self.grayscale = Grayscale()
def __call__(self, img):
# when alpha = 0., the image does not change
# when alpha = alphamax (<= 1.), the image is replaced by the average of pixels of its grayscale version
gs = self.grayscale(img)
gs.fill_(gs.mean())
alpha = self.alphamax * torch.rand(1).item()
return torch.lerp(img, gs, alpha)
class Saturation(object):
def __init__(self, alphamax):
self.alphamax = alphamax
self.grayscale = Grayscale()
def __call__(self, img):
# when alpha = 0., the image does not change
# when alpha = alphamax (<= 1.), the image is replaced by its grayscale version
gs = self.grayscale(img)
alpha = self.alphamax * torch.rand(1).item()
return torch.lerp(img, gs, alpha)
class ColorJitter(object):
def __init__(self, brightness_amax=0.4, contrast_amax=0.4, saturation_amax=0.4):
self.transforms = []
if brightness_amax != 0.:
self.transforms.append(Brightness(alphamax=brightness_amax))
if contrast_amax != 0.:
self.transforms.append(Contrast(alphamax=contrast_amax))
if saturation_amax != 0.:
self.transforms.append(Saturation(alphamax=saturation_amax))
def __call__(self, img):
if self.transforms is not None:
order = torch.randperm(len(self.transforms))
for i in order:
img = self.transforms[i](img)
return img
class Lighting(object):
"""AlexNet-style, PCA-based lighting noise."""
def __init__(self, pcaparams, alphastd=0.1):
self.eigvals = pcaparams['eigvals']
self.eigvecs = pcaparams['eigvecs']
self.alphastd = alphastd
def __call__(self, img):
# let V be the matrix which columns V^{(j)} are the Principal Components
# to each RGB pixel is added a random combination \sum_{j} V^{(j)} (\alpha_{j} * \Lambda_{j}),
# with \alpha_{j} a normally distributed random scaling factor of the j-th component
if self.alphastd != 0.:
alpha = img.new_tensor(0).resize_(3).normal_(0, self.alphastd)
noise = torch.mul(alpha.view(1, 3), self.eigvals.view(1, 3))
noise = torch.mul(self.eigvecs.type_as(img).clone(), noise).sum(1)
img = torch.add(img, noise.view(3, 1, 1).expand_as(img))
return img
def get_transforms(augment):
valid_t = Compose([Resize(256),
CenterCrop(224),
ToTensor(),
Normalize(**_ImageNet['Normalize'])])
if augment == False:
train_t = valid_t
elif augment == True:
train_t = Compose([RandomResizedCrop(224),
RandomHorizontalFlip(),
ToTensor(),
ColorJitter(),
Lighting(_ImageNet['PCA']),
Normalize(**_ImageNet['Normalize'])])
elif augment == "torchvision":
train_t = Compose([RandomResizedCrop(224),
RandomHorizontalFlip(),
ToTensor(),
Normalize(**_ImageNet['Normalize'])])
elif augment == "torchvision2":
train_t = Compose([Resize(256),
RandomCrop(224),
RandomHorizontalFlip(),
ToTensor(),
Normalize(**_ImageNet['Normalize'])])
else:
assert(False)
transforms = {
'training': train_t,
'validation': valid_t
}
return transforms
def load_data_sets(dir_data, data_config):
transforms = get_transforms(data_config['augment'])
train_set = torchvision.datasets.ImageFolder(os.path.join(dir_data, 'train'), transforms['training'])
valid_set = torchvision.datasets.ImageFolder(os.path.join(os.path.realpath(dir_data), 'val'), transforms['validation'])
test_set = valid_set
return train_set, valid_set, test_set
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,141
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/MobileNetv2/__init__.py
|
from .preprocess import load_data_sets
from .postprocess import postprocess_pr, postprocess_gt
from .mobilenetv2baseline import MobileNetv2Baseline
from .mobilenetv2residuals import MobileNetv2Residuals
from .mobilenetv2quantWeight import MobileNetv2QuantWeight
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,142
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/daemon.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
import torch
import torch.nn as nn
from .transfer import load_pretrained
def get_topo(logbook):
"""Return a network for the experiment and the loss function for training."""
# create the network
net_config = logbook.config['indiv']['net']
if net_config['class'] not in logbook.module.__dict__:
raise ValueError('Network topology {} is not defined for problem {}'.format(net_config['class'], logbook.problem))
net = getattr(logbook.module, net_config['class'])(**net_config['params'])
# load checkpoint state or pretrained network
if logbook.ckpt:
net.load_state_dict(logbook.ckpt['indiv']['net'])
elif net_config['pretrained']:
load_pretrained(logbook, net)
# move to proper device and, if possible, parallelize
device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')
net = net.to(device)
if torch.cuda.device_count() > 1:
net_maybe_par = nn.DataParallel(net)
else:
net_maybe_par = net
# create the loss function
loss_fn_config = logbook.config['indiv']['loss_function']
loss_fn_dict = {**nn.__dict__, **logbook.module.__dict__}
if loss_fn_config['class'] not in loss_fn_dict:
raise ValueError('Loss function {} is not defined.'.format(loss_fn_config['class']))
loss_fn = loss_fn_dict[loss_fn_config['class']]
if 'net' in loss_fn.__init__.__code__.co_varnames:
loss_fn = loss_fn(net, **loss_fn_config['params'])
else:
loss_fn = loss_fn(**loss_fn_config['params'])
return net, net_maybe_par, device, loss_fn
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,143
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/ste_ops.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import quantlab.indiv as indiv
class ClampWithGradInwards(torch.autograd.Function):
"""Clamps the input, passes the grads for inputs inside or at the
"""
@staticmethod
def forward(ctx, x, low, high):
ctx.save_for_backward(x, low, high)
return x.clamp(low.item(), high.item())
@staticmethod
def backward(ctx, grad_incoming):
x, low, high = ctx.saved_tensors
grad_outgoing = grad_incoming.clone()
grad_outgoing[(x > high)] = 0
grad_outgoing[(x < low)] = 0
grad_outgoing[(x == high)*(grad_incoming < 0)] = 0
grad_outgoing[(x == low )*(grad_incoming > 0)] = 0
return grad_outgoing, None, None
def clampWithGrad(x, low, high):
return x - (x - x.clamp(low,high)).detach()
def clampWithGradInwards(x, low, high):
return ClampWithGradInwards().apply(x, x.new([low]), x.new([high]))
def STERoundFunctional(x):
return x - (x - x.round()).detach()
def STEFloorFunctional(x):
neg = (x < 0).to(dtype=torch.float)
floored = x.floor() + neg
return x - (x - floored).detach()
class STEController(indiv.Controller):
def __init__(self, modules, clearOptimStateOnStart=False):
super().__init__()
self.modules = modules
self.clearOptimStateOnStart = clearOptimStateOnStart
def step(self, epoch, optimizer=None, tensorboardWriter=None):
#step each STE module
for m in self.modules:
m.step(epoch, self.clearOptimStateOnStart, optimizer)
@staticmethod
def getSteModules(net):
return [m for m in net.modules() if isinstance(m, STEActivation)]
class STEActivation(torch.nn.Module):
"""quantizes activations according to the straight-through estiamtor (STE).
Needs a STEController, if startEpoch > 0
monitorEpoch: In this epoch, keep track of the maximal activation value (absolute value).
Then (at epoch >= startEpoch), clamp the values to [-max, max], and then do quantization.
If monitorEpoch is None, max=1 is used."""
def __init__(self, startEpoch=0, numLevels=3, passGradsWhenClamped=False, monitorEpoch=None, floorToZero=False):
super().__init__()
self.startEpoch = startEpoch
self.started = startEpoch <= 0
self.monitorEpoch = monitorEpoch
self.monitoring = False
if monitorEpoch is not None:
self.monitoring = monitorEpoch == 1 # because the epoch starts at epoch 1
assert(startEpoch > monitorEpoch)
self.floorToZero = floorToZero
assert(numLevels >= 2)
self.numLevels = numLevels
self.passGradsWhenClamped = passGradsWhenClamped
self.absMaxValue = torch.nn.Parameter(torch.ones(1),
requires_grad=False)
def forward(self, x):
if self.monitoring:
self.absMaxValue.data[0] = max(x.abs().max(), self.absMaxValue.item())
if self.started:
# factor = 1/self.absMaxValue.item() * (self.numLevels // 2)
# xclamp = clampWithGrad(x, -1, 1)
x = x / self.absMaxValue.item() # map from [-max, max] to [-1, 1]
if self.passGradsWhenClamped:
# xclamp = clampWithGrad(x, -1, 1)
xclamp = clampWithGradInwards(x, -1, 1)
else:
xclamp = x.clamp(-1, 1)
y = xclamp
if self.floorToZero:
y = STEFloorFunctional(y*((self.numLevels - 1)/2))/((self.numLevels - 1)/2)
else:
y = (y + 1)/2 # map from [-1,1] to [0,1]
y = STERoundFunctional(y*(self.numLevels - 1))/(self.numLevels - 1)
y = 2*y - 1
y = y * self.absMaxValue.item() # map from [-1, 1] to [-max, max]
# factorLevels = (self.numLevels // 2)
# y = STERoundFunctional(xclamp*factorLevels)/factorLevels
else:
y = x
return y
def step(self, epoch, clearOptimStateOnStart, optimizer):
if clearOptimStateOnStart and epoch == self.startEpoch:
optimizer.state.clear()
if epoch >= self.startEpoch:
self.started = True
if self.monitorEpoch is not None and epoch == self.monitorEpoch:
self.monitoring = True
self.absMaxValue.data[0] = 0.0
else:
self.monitoring = False
if __name__ == "__main__":
#TESTING
u = torch.randn(10, requires_grad=True)
x = u*2
y = STEActivation(numLevels=2)(x)
# y = STERoundFunctional(x)
# y = clampWithGradInwards(x, -1, 1)
# L = (y-torch.ones_like(y)*10).norm(2) # pull to 10
L = y.norm(2) # pull to 0
L.backward()
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,144
|
xiaywang/QuantLab
|
refs/heads/master
|
/eegnet_run.py
|
import os
import shutil
import json
import sys
import numpy as np
from contextlib import redirect_stdout, redirect_stderr
import progress
from tqdm import tqdm
import pickle
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from main import main as quantlab_main
PROBLEM = "BCI-CompIV-2a"
TOPOLOGY = "EEGNet"
EXP_FOLDER = "logs/exp{}"
MEAS_ID = 12
INQ_CONFIG = f"measurement/M{MEAS_ID:02}.json"
BAK_CONFIG = ".config_backup.json"
MAIN_CONFIG = "config.json"
EXP_BASE = MEAS_ID * 100
EXPORT_FILE = f"logs/measurement_{MEAS_ID:02}" + "_{}.npz"
EXPORT_GRID_FILE = 'logs/grid_{}.npz'
BENCHMARK = True
GRID_MEASUREMENT = False
N_ITER = 15
def single_iter(bar=None, silent=False, n_weights=None, n_activ=None):
iter_stats = np.zeros((9, 4))
with TestEnvironment():
for i in range(9):
subject = i + 1
stats = _do_subject(subject, bar, silent, n_weights=n_weights, n_activ=n_activ)
if not silent:
print(f"Subject {subject}: quantized accuracy: {stats['valid_acc']:.4f} ")
iter_stats[i] = np.array([stats['train_loss'],
stats['train_acc'],
stats['valid_loss'],
stats['valid_acc']])
if not silent:
print(f"Average quantized accuracy = {iter_stats.mean(axis=0)[3]}")
return iter_stats
def grid_measurement():
stats = {}
cases = [
(255, 255),
(255, 127),
(255, 63),
(255, 31),
(255, 15),
(127, 255),
(127, 127),
(127, 63),
(127, 31),
(127, 15),
(63, 255),
(63, 127),
(63, 63),
(63, 31),
(63, 15),
(31, 255),
(31, 127),
(31, 63),
(31, 31),
(31, 15),
(15, 255),
(15, 127),
(15, 63),
(15, 31),
(15, 15),
]
with tqdm(desc=f'Grid Searching on measurement {MEAS_ID:02}', total=N_ITER * 9 * len(cases),
ascii=True) as bar:
for n_weights, n_activ in cases:
stats[(n_weights, n_activ)] = np.zeros((N_ITER, 9, 4))
for i in range(N_ITER):
iter_stats = single_iter(bar=bar, silent=True, n_weights=n_weights, n_activ=n_activ)
stats[(n_weights, n_activ)][i, :, :] = iter_stats
legend = ["train_loss", "train_acc", "valid_loss", "valid_acc"]
# store it
filename = os.path.join(PROBLEM, 'grid_results.pkl')
with open(filename, 'wb') as _f:
pickle.dump({"stats": stats, "legend": legend}, _f)
def benchmark():
stats = np.zeros((N_ITER, 9, 4))
with tqdm(desc=f'Benchmarking Measurement {MEAS_ID:02}', total=N_ITER * 9, ascii=True) as bar:
for i in range(N_ITER):
iter_stats = single_iter(bar=bar, silent=True)
stats[i, :, :] = iter_stats
# store the data to make sure not to loose it
np.savez(file=os.path.join(PROBLEM, EXPORT_FILE.format("runs")),
train_loss=stats[i, :, 0],
train_acc=stats[i, :, 1],
valid_loss=stats[i, :, 2],
valid_acc=stats[i, :, 3])
# compute statistics
avg_stats = stats.mean(axis=0)
std_stats = stats.std(axis=0)
# For the overall score, first average along all subjects.
# For standard deviation, average all standard deviations of all subjects
mean_avg_stats = avg_stats[:].mean(axis=0) # average over all subjects
mean_std_stats = std_stats[:].mean(axis=0) # std over all subjects
print(f"Total Average Accuracy: {mean_avg_stats[3]:.4f} +- {mean_std_stats[3]:.4f}\n")
for i in range(0, 9):
print(f"subject {i+1}: quantized model = {avg_stats[i,3]:.4f} +- {std_stats[i,3]:.4f}")
def _do_subject(subject, bar=None, silent=False, n_weights=None, n_activ=None):
exp_id = EXP_BASE + subject
if not silent:
print(f"Subject {subject}: training quantized model (exp{exp_id})...\r", end='',
flush=True)
modification = {'treat.data.subject': subject}
if n_weights is not None:
modification['indiv.net.params.weightInqNumLevels'] = n_weights
modification["indiv.net.params.first_layer_only"] = True
if n_activ is not None:
modification['indiv.net.params.actSTENumLevels'] = n_activ
valid_stats, train_stats = _execute_quantlab(INQ_CONFIG, exp_id, modification)
if bar is not None:
bar.update()
# accumulate log files
if BENCHMARK or GRID_MEASUREMENT:
# _accumulate_logs(subject, exp_id)
_just_store_anything(subject, exp_id, n_weights=n_weights, n_activ=n_activ)
return _format_all_stats(train_stats, valid_stats)
def _execute_quantlab(config_file, exp_id, modify_keys=None):
# remove all the logs of the previous quantized training experiment
log_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id))
if os.path.exists(log_folder):
shutil.rmtree(log_folder)
# load configuration
config = {}
with open(os.path.join(PROBLEM, config_file)) as _fp:
config = json.load(_fp)
# modify keys
for path, value in modify_keys.items():
_set_dict_value(config, path, value)
# store the configuration back as config.json
if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):
os.remove(os.path.join(PROBLEM, MAIN_CONFIG))
with open(os.path.join(PROBLEM, MAIN_CONFIG), "w") as _fp:
json.dump(config, _fp)
# execute quantlab without output
with open(os.devnull, 'w') as devnull, redirect_stderr(devnull), redirect_stdout(devnull):
train_stats, stats = quantlab_main(PROBLEM, TOPOLOGY, exp_id, 'best', 'train', 10, 1, False,
True)
return stats, train_stats
def _format_all_stats(train_stats, valid_stats):
stats = {}
for key, value in train_stats.items():
if key.endswith("loss"):
stats['train_loss'] = value
if key.endswith("metric"):
stats['train_acc'] = value
for key, value in valid_stats.items():
if key.endswith("loss"):
stats['valid_loss'] = value
if key.endswith("metric"):
stats['valid_acc'] = value
return stats
def _format_stats(ref_stats, quant_stats=None):
stats = {}
if quant_stats is None:
for key, value in ref_stats.items():
if key.endswith("loss"):
stats['loss'] = value
if key.endswith("metric"):
stats['acc'] = value
else:
for key, value in ref_stats.items():
if key.endswith("loss"):
stats['float_loss'] = value
if key.endswith("metric"):
stats['float_acc'] = value
for key, value in quant_stats.items():
if key.endswith("loss"):
stats['quant_loss'] = value
if key.endswith("metric"):
stats['quant_acc'] = value
return stats
def _set_dict_value(d, path, value):
keys = path.split('.')
d_working = d
for key in keys[:-1]:
d_working = d_working[key]
d_working[keys[-1]] = value
def _just_store_anything(subject, exp_id, n_weights=None, n_activ=None):
""" stores everything """
# extract name of logfile
stats_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id), "stats")
log_files = os.listdir(stats_folder)
assert(len(log_files) == 1)
log_file = os.path.join(stats_folder, log_files[0])
# get eventaccumulator
ea = EventAccumulator(log_file)
ea.Reload()
# load data file
if GRID_MEASUREMENT:
name_addon = f"data_W{n_weights}_A{n_activ}_S{subject:02}"
else:
name_addon = f"data_S{subject:02}"
data_file = os.path.join(PROBLEM, EXPORT_FILE.format(name_addon))
if os.path.exists(data_file):
with np.load(data_file) as data_loader:
data = dict(data_loader)
else:
data = {'num_trials': 0}
# update the data dictionary to keep the mean value
num_trials = data['num_trials']
for key in ea.Tags()['scalars']:
new_arr = _prepare_scalar_array_from_tensorboard(ea, key)
new_arr = np.array([new_arr])
if num_trials == 0:
# just add the data
data[key] = new_arr
else:
assert(key in data)
data[key] = np.concatenate((data[key], new_arr), axis=0)
data['num_trials'] += 1
# store data back into the same file
np.savez(data_file, **data)
def _accumulate_logs(subject, exp_id):
# extract name of logfile
stats_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id), "stats")
log_files = os.listdir(stats_folder)
assert(len(log_files) == 1)
log_file = os.path.join(stats_folder, log_files[0])
# get eventaccumulator
ea = EventAccumulator(log_file)
ea.Reload()
# load data file
name_addon = f"data_S{subject:02}"
data_file = os.path.join(PROBLEM, EXPORT_FILE.format(name_addon))
if os.path.exists(data_file):
with np.load(data_file) as data_loader:
data = dict(data_loader)
else:
data = {'num_trials': 0}
# update the data dictionary to keep the mean value
num_trials = data['num_trials']
for key in ea.Tags()['scalars']:
new_arr = _prepare_scalar_array_from_tensorboard(ea, key)
if num_trials == 0:
# just add the data
data[key] = new_arr
else:
assert(key in data)
data[key] = (data[key] * num_trials + new_arr) / (num_trials + 1)
data['num_trials'] += 1
# store data back into the same file
np.savez(data_file, **data)
def _prepare_scalar_array_from_tensorboard(ea, key, start_step=1):
if ea.Scalars(key)[-1].step == len(ea.Scalars(key)):
return np.array([x.value for x in ea.Scalars(key)])
else:
arr = np.zeros(ea.most_recent_step)
entries = ea.Scalars(key)
# we assume the value is zero at the beginning
for i_entry in range(len(entries)):
start_idx = entries[i_entry].step - start_step
end_idx = entries[i_entry + 1].step if i_entry + 1 < len(entries) else \
ea.most_recent_step - start_step + 1
arr[start_idx:end_idx] = entries[i_entry].value
return arr
class TestEnvironment():
def __enter__(self):
# backup config.json if it exists
if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):
os.rename(os.path.join(PROBLEM, MAIN_CONFIG),
os.path.join(PROBLEM, BAK_CONFIG))
# hide progress default output
self.devnull = open(os.devnull, 'w')
progress.Infinite.file = self.devnull
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# remove the created config.json file
if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):
os.remove(os.path.join(PROBLEM, MAIN_CONFIG))
# move backup back
if os.path.exists(os.path.join(PROBLEM, BAK_CONFIG)):
os.rename(os.path.join(PROBLEM, BAK_CONFIG),
os.path.join(PROBLEM, MAIN_CONFIG))
# reenable default progress
progress.Infinite.file = sys.stderr
if __name__ == '__main__':
if GRID_MEASUREMENT:
grid_measurement()
if BENCHMARK:
benchmark()
else:
single_iter()
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,145
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py
|
# Copyright (c) 2019 Tibor Schneider
import numpy as np
import torch as t
import torch.nn.functional as F
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
from quantlab.indiv.ste_ops import STEActivation, STEController
class EEGNet(t.nn.Module):
"""
Quantized EEGNet
"""
def __init__(self, F1=8, D=2, F2=None, C=22, T=1125, N=4, p_dropout=0.5,
dropout_type='TimeDropout2d', quantWeight=True, quantAct=True,
weightInqSchedule=None, weightInqNumLevels=255, weightInqStrategy="matnitude",
weightInqInitMethod="uniform", actSTENumLevels=255, actSTEStartEpoch=2,
floorToZero=False, actFirstLayerNumLevels=None, weightFirstLayerNumLevels=None,
first_layer_only=False):
"""
F1: Number of spectral filters
D: Number of spacial filters (per spectral filter), F2 = F1 * D
F2: Number or None. If None, then F2 = F1 * D
C: Number of EEG channels
T: Number of time samples
N: Number of classes
p_dropout: Dropout Probability
dropout_type: string, either 'dropout', 'SpatialDropout2d' or 'TimeDropout2D'
floorToZero: STE rounding is done by floor towards zero
"""
super(EEGNet, self).__init__()
if weightInqSchedule is None:
raise TypeError("Parameter weightInqSchedule is not set")
if weightFirstLayerNumLevels is None:
weightFirstLayerNumLevels = weightInqNumLevels
if actFirstLayerNumLevels is None:
actFirstLayerNumLevels = actSTENumLevels
weightInqSchedule = {int(k): v for k, v in weightInqSchedule.items()}
# prepare network constants
if F2 is None:
F2 = F1 * D
# Prepare Dropout Type
if dropout_type.lower() == 'dropout':
dropout = t.nn.Dropout
elif dropout_type.lower() == 'spatialdropout2d':
dropout = t.nn.Dropout2d
elif dropout_type.lower() == 'timedropout2d':
dropout = TimeDropout2d
else:
raise ValueError("dropout_type must be one of SpatialDropout2d, Dropout or "
"WrongDropout2d")
# store local values
self.F1, self.D, self.F2, self.C, self.T, self.N = (F1, D, F2, C, T, N)
self.p_dropout = p_dropout
# Number of input neurons to the final fully connected layer
n_features = (T // 8) // 8
# prepare helper functions to easily declare activation, convolution and linear unit
def activ():
return t.nn.ReLU(inplace=True)
def quantize(numLevels=None, first=False):
start = actSTEStartEpoch
monitor = start - 1
if numLevels is None or (not first and first_layer_only):
numLevels = actSTENumLevels
if quantAct:
return STEActivation(startEpoch=start, monitorEpoch=monitor,
numLevels=numLevels, floorToZero=floorToZero)
else:
return t.nn.Identity()
def linear(name, n_in, n_out, bias=True, first=False):
if quantWeight and not (not first and first_layer_only):
return INQLinear(n_in, n_out, bias=bias, numLevels=weightInqNumLevels,
strategy=weightInqStrategy, quantInitMethod=weightInqInitMethod)
else:
return t.nn.Linear(n_in, n_out, bias=bias)
def conv2d(name, in_channels, out_channels, kernel_size, numLevels=None, first=False, **argv):
if quantWeight and not (not first and first_layer_only):
if numLevels is None:
numLevels = weightInqNumLevels
return INQConv2d(in_channels, out_channels, kernel_size,
numLevels=numLevels, strategy=weightInqStrategy,
quantInitMethod=weightInqInitMethod, **argv)
else:
return t.nn.Conv2d(in_channels, out_channels, kernel_size, **argv)
# Block 1
self.quant1 = quantize(actFirstLayerNumLevels, first=True)
self.conv1_pad = t.nn.ZeroPad2d((31, 32, 0, 0))
self.conv1 = conv2d("conv1", 1, F1, (1, 64), bias=False,
numLevels=weightFirstLayerNumLevels, first=True)
self.batch_norm1 = t.nn.BatchNorm2d(F1, momentum=0.01, eps=0.001)
self.quant2 = quantize()
self.conv2 = conv2d("conv2", F1, D * F1, (C, 1), groups=F1, bias=False)
self.batch_norm2 = t.nn.BatchNorm2d(D * F1, momentum=0.01, eps=0.001)
self.activation1 = activ()
self.pool1 = t.nn.AvgPool2d((1, 8))
self.quant3 = quantize()
# self.dropout1 = dropout(p=p_dropout)
self.dropout1 = t.nn.Dropout(p=p_dropout)
# Block 2
self.sep_conv_pad = t.nn.ZeroPad2d((7, 8, 0, 0))
self.sep_conv1 = conv2d("sep_conv1", D * F1, D * F1, (1, 16), groups=D * F1, bias=False)
self.quant4 = quantize()
self.sep_conv2 = conv2d("sep_conv2", D * F1, F2, (1, 1), bias=False)
self.batch_norm3 = t.nn.BatchNorm2d(F2, momentum=0.01, eps=0.001)
self.activation2 = activ()
self.pool2 = t.nn.AvgPool2d((1, 8))
self.quant5 = quantize()
self.dropout2 = dropout(p=p_dropout)
# Fully connected layer (classifier)
self.flatten = Flatten()
self.fc = linear("fc", F2 * n_features, N, bias=True)
self.quant6 = quantize(255)
self.inqController = INQController(INQController.getInqModules(self), weightInqSchedule,
clearOptimStateOnStep=True)
self.steController = STEController(STEController.getSteModules(self),
clearOptimStateOnStart=True)
# initialize weights
# self._initialize_params()
def forward(self, x, with_stats=False):
# input dimensions: (s, 1, C, T)
x = self.quant1(x)
# Block 1
x = self.conv1_pad(x)
x = self.conv1(x) # output dim: (s, F1, C, T-1)
x = self.batch_norm1(x)
x = self.quant2(x)
x = self.conv2(x) # output dim: (s, D * F1, 1, T-1)
x = self.batch_norm2(x)
x = self.activation1(x)
x = self.pool1(x) # output dim: (s, D * F1, 1, T // 8)
x = self.quant3(x)
x = self.dropout1(x)
# Block2
x = self.sep_conv_pad(x)
x = self.sep_conv1(x) # output dim: (s, D * F1, 1, T // 8 - 1)
x = self.quant4(x)
x = self.sep_conv2(x) # output dim: (s, F2, 1, T // 8 - 1)
x = self.batch_norm3(x)
x = self.activation2(x)
x = self.pool2(x) # output dim: (s, F2, 1, T // 64)
x = self.quant5(x)
x = self.dropout2(x)
# Classification
x = self.flatten(x) # output dim: (s, F2 * (T // 64))
x = self.fc(x) # output dim: (s, N)
x = self.quant6(x)
if with_stats:
stats = [('conv1_w', self.conv1.weight.data),
('conv2_w', self.conv2.weight.data),
('sep_conv1_w', self.sep_conv1.weight.data),
('sep_conv2_w', self.sep_conv2.weight.data),
('fc_w', self.fc.weight.data),
('fc_b', self.fc.bias.data)]
return stats, x
return x
def forward_with_tensor_stats(self, x):
return self.forward(x, with_stats=True)
def _initialize_params(self, weight_init=t.nn.init.xavier_uniform_, bias_init=t.nn.init.zeros_):
"""
Initializes all the parameters of the model
Parameters:
- weight_init: t.nn.init inplace function
- bias_init: t.nn.init inplace function
"""
def init_weight(m):
if isinstance(m, t.nn.Conv2d) or isinstance(m, t.nn.Linear):
weight_init(m.weight)
if isinstance(m, t.nn.Linear):
bias_init(m.bias)
self.apply(init_weight)
class Flatten(t.nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class TimeDropout2d(t.nn.Dropout2d):
"""
Dropout layer, where the last dimension is treated as channels
"""
def __init__(self, p=0.5, inplace=False):
"""
See t.nn.Dropout2d for parameters
"""
super(TimeDropout2d, self).__init__(p=p, inplace=inplace)
def forward(self, input):
if self.training:
input = input.permute(0, 3, 1, 2)
input = F.dropout2d(input, self.p, True, self.inplace)
input = input.permute(0, 2, 3, 1)
return input
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,146
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/MNIST/MLP/mlp.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import torch
import torch.nn as nn
from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear
from quantlab.indiv.inq_ops import INQController, INQLinear
class MLP(nn.Module):
"""Quantized Multi-Layer Perceptron (both weights and activations)."""
def __init__(self, capacity, quant_schemes,
quantAct=True, quantWeights=True,
weightInqSchedule=None):
super().__init__()
nh = int(2048 * capacity)
if weightInqSchedule != None:
weightInqSchedule = {int(k): v for k, v in weightInqSchedule}
def activ(name, nc):
if quantAct:
return StochasticActivation(*quant_scheme[name], nc)
else:
return nn.ReLU()
def linear(name, ni, no, bias=False):
if quantWeights:
if weightInqSchedule != None:
return INQLinear(ni, no, bias=bias, numBits=2)
else:
return StochasticLinear(*quant_scheme[name], ni, no, bias=bias)
else:
return nn.Linear(ni, no, bias=bias)
self.phi1_fc = linear('phi1_fc', 28*28, nh, bias=False)
self.phi1_bn = nn.BatchNorm1d(nh)
self.phi1_act = activ('phi1_act', nh)
self.phi2_fc = linear('phi2_fc', nh, nh, bias=False)
self.phi2_bn = nn.BatchNorm1d(nh)
self.phi2_act = activ('phi2_act', nh)
self.phi3_fc = linear('phi3_fc', nh, nh, bias=False)
self.phi3_bn = nn.BatchNorm1d(nh)
self.phi3_act = activ('phi3_act', nh)
self.phi4_fc = linear('phi4_fc', nh, 10, bias=False)
self.phi4_bn = nn.BatchNorm1d(10)
#weightInqSchedule={15: 0.5, 22: 0.75, 30: 0.875, 37: 0.9375, 44: 1.0}
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule)
def forward(self, x, withStats=False):
stats = []
x = x.view(-1, 28*28)
x = self.phi1_fc(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_fc(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_fc(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_fc(x)
x = self.phi4_bn(x)
if withStats:
stats.append(('phi1_fc_w', self.phi1_fc.weight.data))
stats.append(('phi2_fc_w', self.phi2_fc.weight.data))
stats.append(('phi3_fc_w', self.phi3_fc.weight.data))
stats.append(('phi4_fc_w', self.phi4_fc.weight.data))
return stats, x
else:
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,147
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/GoogLeNet/preprocess.py
|
../MobileNetv2/preprocess.py
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,148
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py
|
from .preprocess import load_data_sets
from .postprocess import postprocess_pr, postprocess_gt
from .meyernet import MeyerNet
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,149
|
xiaywang/QuantLab
|
refs/heads/master
|
/export_net_data.py
|
import os
import numpy as np
import argparse
import json
import torch
import shutil
from main import main as quantlab_main
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_id', help='experiment identification', type=int, default=999)
parser.add_argument('-s', '--sample', help='index of the sample', type=int, default=0)
parser.add_argument('--train', help='Train network', action='store_true')
parser.add_argument('-a', '--all', help='Export all samples', action='store_true')
args = parser.parse_args()
exp_folder = f'BCI-CompIV-2a/logs/exp{args.exp_id:03}'
output_file = 'export/{}.npz'
output_config_file = "export/config.json"
# train the network
if args.train:
# delete the exp folder
try:
shutil.rmtree(exp_folder)
print('exp folder was deleted!')
except:
print('exp folder does not exist, skipping deletion')
quantlab_main('BCI-CompIV-2a', 'EEGNet', exp_id=args.exp_id, ckpt_every=1, num_workers=1,
do_validPreTrain=False, use_single_gpu=True)
# import the EEGnet folder
exec(open('quantlab/BCI-CompIV-2a/EEGNet/preprocess.py').read())
exec(open('quantlab/BCI-CompIV-2a/EEGNet/eegnet.py').read())
exp_folder = f'BCI-CompIV-2a/logs/exp{args.exp_id:03}'
# load the configuration file
with open(f'{exp_folder}/config.json') as _f:
config = json.load(_f)
# get data loader
_, _, dataset = load_data_sets('BCI-CompIV-2a/data', config['treat']['data'])
# load the model
ckpts = os.listdir(f'{exp_folder}/saves')
ckpts = [x for x in ckpts if "epoch" in x]
ckpts.sort()
last_epoch = int(ckpts[-1].replace('epoch', '').replace('.ckpt', ''))
ckpt = torch.load(f'{exp_folder}/saves/{ckpts[-1]}')
model = EEGNet(**config['indiv']['net']['params'])
model.load_state_dict(ckpt['indiv']['net'])
for module in model.steController.modules:
module.started = True
model.train(False)
# export all weights
weights = {key: value.cpu().detach().numpy() for key, value in ckpt['indiv']['net'].items()}
np.savez(output_file.format("net"), **weights)
if args.all:
samples = []
labels = []
predictions = []
n_samples = len(dataset)
for sample in range(n_samples):
x = dataset[sample][0]
x = x.reshape(1, 1, 22, 1125)
label = dataset[sample][1]
prediction = model(x)
samples.append(x.numpy())
labels.append(label.numpy())
predictions.append(prediction.detach().numpy())
np.savez(output_file.format("benchmark"), samples=samples, labels=labels, predictions=predictions)
# save input data
np.savez(output_file.format("input"), input=dataset[args.sample][0].numpy())
# prepare verification data
verification = {}
# do forward pass and compute the result of the network
with torch.no_grad():
x = dataset[args.sample][0]
verification['input'] = x.numpy()
x = x.reshape(1, 1, 22, 1125)
x = model.quant1(x)
verification['input_quant'] = x.numpy()
x = model.conv1_pad(x)
x = model.conv1(x)
verification['layer1_conv_out'] = x.numpy()
x = model.batch_norm1(x)
verification['layer1_bn_out'] = x.numpy()
x = model.quant2(x)
verification['layer1_activ'] = x.numpy()
x = model.conv2(x)
verification['layer2_conv_out'] = x.numpy()
x = model.batch_norm2(x)
verification['layer2_bn_out'] = x.numpy()
x = model.activation1(x)
verification['layer2_relu_out'] = x.numpy()
x = model.pool1(x)
verification['layer2_pool_out'] = x.numpy()
x = model.quant3(x)
verification['layer2_activ'] = x.numpy()
x = model.sep_conv_pad(x)
x = model.sep_conv1(x)
verification['layer3_conv_out'] = x.numpy()
x = model.quant4(x)
verification['layer3_activ'] = x.numpy()
x = model.sep_conv2(x)
verification['layer4_conv_out'] = x.numpy()
x = model.batch_norm3(x)
verification['layer4_bn_out'] = x.numpy()
x = model.activation2(x)
verification['layer4_relu_out'] = x.numpy()
x = model.pool2(x)
verification['layer4_pool_out'] = x.numpy()
x = model.quant5(x)
verification['layer4_activ'] = x.numpy()
x = model.flatten(x)
x = model.fc(x)
verification['output'] = x.numpy()
x = model.quant6(x)
verification['output_quant'] = x.numpy()
np.savez(output_file.format("verification"), **verification)
# copy the configuration file to the export folder
shutil.copyfile(f'{exp_folder}/config.json', output_config_file)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,150
|
xiaywang/QuantLab
|
refs/heads/master
|
/main.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import argparse
from quantlab.protocol.logbook import Logbook
from quantlab.indiv.daemon import get_topo
from quantlab.treat.daemon import get_algo, get_data
from quantlab.protocol.rooms import train, test
import quantlab.indiv as indiv
def main(problem, topology, exp_id=None, load='best', mode='train', ckpt_every=10, num_workers=10,
do_validPreTrain=True, use_single_gpu=False):
# create/retrieve experiment logbook
logbook = Logbook(problem, topology, exp_id, load)
# create/retrieve network and treatment
net, net_maybe_par, device, loss_fn = get_topo(logbook)
thr, opt, lr_sched = get_algo(logbook, net)
train_l, valid_l, test_l = get_data(logbook, num_workers=num_workers)
if use_single_gpu:
net_maybe_par = net
# run experiment
if mode == 'train':
for _ in range(logbook.i_epoch + 1, logbook.config['treat']['max_epoch'] + 1):
logbook.start_epoch()
thr.step()
#prepare training network
net.train()
for ctrlr in indiv.Controller.getControllers(net):
# call controllers for e.g. LR, annealing, ... adjustments
ctrlr.step_preTraining(logbook.i_epoch, opt, tensorboardWriter=logbook.writer)
# validate pre-training network
validPreTrain_stats = {}
if do_validPreTrain:
validPreTrain_stats = test(logbook, net, device, loss_fn, valid_l, valid=True, prefix='validPreTrain')
# train
train_stats = train(logbook, net_maybe_par, device, loss_fn, opt, train_l)
# prepare validation network
net.eval()
for ctrlr in indiv.Controller.getControllers(net):
ctrlr.step_preValidation(logbook.i_epoch, tensorboardWriter=logbook.writer)
#validate (re-)trained network
valid_stats = test(logbook, net, device, loss_fn, valid_l, valid=True)
stats = {**train_stats, **valid_stats, **validPreTrain_stats}
# update learning rate
if 'metrics' in lr_sched.step.__code__.co_varnames:
lr_sched_metric = stats[logbook.config['treat']['lr_scheduler']['step_metric']]
lr_sched.step(lr_sched_metric)
else:
lr_sched.step()
# save model if update metric has improved...
if logbook.is_better(stats):
ckpt = {'indiv': {'net': net.state_dict()},
'treat': {
'thermostat': thr.state_dict(),
'optimizer': opt.state_dict(),
'lr_scheduler': lr_sched.state_dict(),
'i_epoch': logbook.i_epoch
},
'protocol': {'metrics': logbook.metrics}}
logbook.store_checkpoint(ckpt, is_best=True)
# ...and/or if checkpoint epoch
is_ckpt_epoch = (logbook.i_epoch % int(ckpt_every)) == 0
if is_ckpt_epoch:
ckpt = {'indiv': {'net': net.state_dict()},
'treat': {
'thermostat': thr.state_dict(),
'optimizer': opt.state_dict(),
'lr_scheduler': lr_sched.state_dict(),
'i_epoch': logbook.i_epoch
},
'protocol': {'metrics': logbook.metrics}}
logbook.store_checkpoint(ckpt)
# return the last validation stats
return train_stats, valid_stats
elif mode == 'test':
# test
net.eval()
test_stats = test(logbook, net, device, loss_fn, test_l)
return test_stats
if __name__ == "__main__":
# Command Line Interface
parser = argparse.ArgumentParser(description='QuantLab')
parser.add_argument('--problem', help='MNIST/CIFAR-10/ImageNet/COCO')
parser.add_argument('--topology', help='Network topology')
parser.add_argument('--exp_id', help='Experiment to launch/resume', default=None)
parser.add_argument('--load', help='Checkpoint to load: best/last/i_epoch', default='best')
parser.add_argument('--mode', help='Experiment mode: train/test', default='train')
parser.add_argument('--ckpt_every', help='Frequency of checkpoints (in epochs)', default=10, type=int)
parser.add_argument('--num_workers', help='Number of workers for DataLoader', default=10, type=int)
parser.add_argument('--skip_validPreTrain', help='Skip validation before training', action='store_true')
parser.add_argument('--use_single_gpu', help='Use a single GPU', action='store_true')
args = parser.parse_args()
main(args.problem, args.topology, args.exp_id, args.load, args.mode, args.ckpt_every,
args.num_workers, not args.skip_validPreTrain, args.use_single_gpu)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,151
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/MNIST/MLP/mlpbaseline.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
import torch.nn as nn
# In order for the baselines to be launched with the same logic as quantized
# models, an empty quantization scheme and an empty thermostat schedule need
# to be configured.
# Use the following templates for the `net` and `thermostat` configurations:
#
# "net": {
# "class": "MLPBaseline",
# "params": {"capacity": 1},
# "pretrained": null,
# "loss_function": {
# "class": "HingeLoss",
# "params": {"num_classes": 10}
# }
# }
#
# "thermostat": {
# "class": "MLPBaseline",
# "params": {
# "noise_schemes": {},
# "bindings": []
# }
# }
class MLPBaseline(nn.Module):
"""Multi-Layer Perceptron."""
def __init__(self, capacity):
super(MLPBaseline, self).__init__()
nh = int(2048 * capacity)
self.phi1_fc = nn.Linear(28 * 28, nh, bias=False)
self.phi1_bn = nn.BatchNorm1d(nh)
self.phi1_act = nn.ReLU6()
self.phi2_fc = nn.Linear(nh, nh, bias=False)
self.phi2_bn = nn.BatchNorm1d(nh)
self.phi2_act = nn.ReLU6()
self.phi3_fc = nn.Linear(nh, nh, bias=False)
self.phi3_bn = nn.BatchNorm1d(nh)
self.phi3_act = nn.ReLU6()
self.phi4_fc = nn.Linear(nh, 10)
def forward(self, x, withStats=False):
x = x.view(-1, 28 * 28)
x = self.phi1_fc(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_fc(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_fc(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_fc(x)
if withStats:
stats = []
stats.append(('phi1_fc_w', self.phi1_fc.weight.data))
stats.append(('phi2_fc_w', self.phi2_fc.weight.data))
stats.append(('phi3_fc_w', self.phi3_fc.weight.data))
stats.append(('phi4_fc_w', self.phi4_fc.weight.data))
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,152
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/BCI-CompIV-2a/EEGNet/__init__.py
|
from .preprocess import load_data_sets
from .postprocess import postprocess_pr, postprocess_gt
from .eegnet import EEGNet
from .eegnetbaseline import EEGNetBaseline
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,153
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/CIFAR-10/VGG/vgg.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import torch
import torch.nn as nn
from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d
from quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d
from quantlab.indiv.ste_ops import STEActivation
class VGG(nn.Module):
"""Quantizable VGG."""
def __init__(self, capacity=1, quant_schemes=None,
quantAct=True, quantActSTENumLevels=None, quantWeights=True,
weightInqSchedule=None, weightInqBits=None, weightInqLevels=None,
weightInqStrategy="magnitude",
quantSkipFirstLayer=False, quantSkipLastLayer=False,
stepEveryEpoch=False, weightInit=None,
rescaleWeights=False, variant=None, weightInqQuantInit=None):
super().__init__()
assert(weightInqBits == None or weightInqLevels == None)
if weightInqBits != None:
print('warning: weightInqBits deprecated')
if weightInqBits == 1:
weightInqLevels = 2
elif weightInqBits >= 2:
weightInqLevels = 2**weightInqBits
else:
assert(False)
def activ(name, nc):
if quantAct:
if quantActSTENumLevels != None and quantActSTENumLevels > 0:
return STEActivation(startEpoch=0,
numLevels=quantActSTENumLevels)
else:
return StochasticActivation(*quant_schemes[name], nc)
else:
assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)
return nn.ReLU(inplace=True)
def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticConv2d(*quant_schemes[name], ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
else:
return INQConv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
else:
return nn.Conv2d(ni, no,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
def linear(name, ni, no, bias=False):
if quantWeights:
if weightInqSchedule == None:
return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)
else:
return INQLinear(ni, no, bias=bias,
numLevels=weightInqLevels,
strategy=weightInqStrategy,
quantInitMethod=weightInqQuantInit)
else:
return nn.Linear(ni, no, bias=bias)
c0 = 3
c1 = int(128 * capacity)
c2 = int(256 * capacity)
c3 = int(512 * capacity)
nh = 1024
# convolutional layers
if quantSkipFirstLayer:
self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=3, padding=1, bias=False)
else:
self.phi1_conv = conv2d('phi1_conv', c0, c1)
self.phi1_bn = nn.BatchNorm2d(c1)
self.phi1_act = activ('phi1_act', c1)
self.phi2_conv = conv2d('phi2_conv', c1, c1)
self.phi2_mp = nn.MaxPool2d(kernel_size=2, stride=2)
self.phi2_bn = nn.BatchNorm2d(c1)
self.phi2_act = activ('phi2_act', c1)
self.phi3_conv = conv2d('phi3_conv', c1, c2)
self.phi3_bn = nn.BatchNorm2d(c2)
self.phi3_act = activ('phi3_act', c2)
self.phi4_conv = conv2d('phi4_conv', c2, c2)
self.phi4_mp = nn.MaxPool2d(kernel_size=2, stride=2)
self.phi4_bn = nn.BatchNorm2d(c2)
self.phi4_act = activ('phi4_act', c2)
self.phi5_conv = conv2d('phi5_conv', c2, c3)
self.phi5_bn = nn.BatchNorm2d(c3)
self.phi5_act = activ('phi5_act', c3)
self.phi6_conv = conv2d('phi6_conv', c3, c3)
self.phi6_mp = nn.MaxPool2d(kernel_size=2, stride=2)
self.phi6_bn = nn.BatchNorm2d(c3)
self.phi6_act = activ('phi6_act', c3)
# dense layers
if variant == None:
self.phi7_fc = linear('phi7_fc', c3*4*4, nh)
self.phi7_bn = nn.BatchNorm1d(nh)
self.phi7_act = activ('phi7_act', nh)
self.phi8_fc = linear('phi8_fc', nh, nh)
self.phi8_bn = nn.BatchNorm1d(nh)
self.phi8_act = activ('phi8_act', nh)
if quantSkipLastLayer:
self.phi9_fc = nn.Linear(nh, 10, bias=False)
self.phi9_bn = nn.BatchNorm1d(10)
else:
self.phi9_fc = linear('phi9_fc', nh, 10)
self.phi9_bn = nn.BatchNorm1d(10)
elif variant == 'VGG-Small':
assert(quantSkipLastLayer)
self.phi7_fc = nn.Identity()
self.phi7_bn = nn.Identity()
self.phi7_act = nn.Identity()
self.phi8_fc = nn.Identity()
self.phi8_bn = nn.Identity()
self.phi8_act = nn.Identity()
self.phi9_fc = nn.Linear(c3*4*4, 10, bias=True)
self.phi9_bn = nn.Identity()
else:
assert(False)
# https://unify.id/wp-content/uploads/2018/03/weight_init_BNN.pdf
def initWeightFunc(m):
if (isinstance(m, nn.Conv2d) or
isinstance(m, INQConv2d) or
isinstance(m, StochasticConv2d)):
w = m.weight.data
#not initializing bias here...
if weightInit == None:
pass
elif weightInit == "He":
nn.init.kaiming_normal_(w, mode='fan_in', nonlinearity='relu')
elif weightInit == "orthogonal":
torch.nn.init.orthogonal_(w, gain=1)
else:
assert(False)
self.apply(initWeightFunc)
if weightInqSchedule != None:
self.inqController = INQController(INQController.getInqModules(self),
weightInqSchedule,
clearOptimStateOnStep=True,
stepEveryEpoch=stepEveryEpoch,
rescaleWeights=rescaleWeights)
def forward(self, x, withStats=False):
x = self.phi1_conv(x)
x = self.phi1_bn(x)
x = self.phi1_act(x)
x = self.phi2_conv(x)
x = self.phi2_mp(x)
x = self.phi2_bn(x)
x = self.phi2_act(x)
x = self.phi3_conv(x)
x = self.phi3_bn(x)
x = self.phi3_act(x)
x = self.phi4_conv(x)
x = self.phi4_mp(x)
x = self.phi4_bn(x)
x = self.phi4_act(x)
x = self.phi5_conv(x)
x = self.phi5_bn(x)
x = self.phi5_act(x)
x = self.phi6_conv(x)
x = self.phi6_mp(x)
x = self.phi6_bn(x)
x = self.phi6_act(x)
# x = x.reshape(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())
x = x.reshape(x.size(0), -1)
x = self.phi7_fc(x)
x = self.phi7_bn(x)
x = self.phi7_act(x)
x = self.phi8_fc(x)
x = self.phi8_bn(x)
x = self.phi8_act(x)
x = self.phi9_fc(x)
x = self.phi9_bn(x)
if withStats:
stats = []
stats.append(('phi1_conv_w', self.phi1_conv.weight.data))
stats.append(('phi3_conv_w', self.phi3_conv.weight.data))
stats.append(('phi5_conv_w', self.phi5_conv.weight.data))
# stats.append(('phi7_fc_w', self.phi7_fc.weight.data))
# stats.append(('phi8_fc_w', self.phi8_fc.weight.data))
# stats.append(('phi9_fc_w', self.phi9_fc.weight.data))
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
# LOAD NETWORK
if __name__ == '__main__':
model = VGG(quantAct=False, quantWeights=True,
weightInqSchedule={'1': 1.0}, quantSkipFirstLayer=True)
# path = '../../../CIFAR-10/logs/exp048/saves/epoch1050.ckpt'
# path = '../../../CIFAR-10/logs/exp057/saves/epoch0900.ckpt'
# path = '../../../CIFAR-10/logs/exp066/saves/epoch1150.ckpt'
# path = '../../../CIFAR-10/logs/exp069/saves/epoch0100.ckpt'
# path = '../../../CIFAR-10/logs/exp308/saves/best.ckpt' # TWN with rescaling
# path = '../../../CIFAR-10/logs/exp071/saves/best.ckpt' # TWN slow latest
# path = '../../../CIFAR-10/logs/exp273/saves/best.ckpt' # TWN fast latest
path = '../../../CIFAR-10/logs/exp032/saves/best.ckpt' # TNN
# path = '../../../CIFAR-10/logs/exp293/saves/best.ckpt' # BNN
state_dicts = torch.load(path, map_location='cpu')
model.load_state_dict(state_dicts['indiv']['net'])
print('non-quant values, layer 3: %8d' % (
torch.isnan(model.phi3_conv.weightFrozen).sum(dtype=torch.long).item()))
print('total values, layer 3: %8d' % (model.phi3_conv.weightFrozen.numel()))
import matplotlib.pyplot as plt
plt.hist(model.phi3_conv.weightFrozen.flatten(),
bins=201)
plt.hist(model.phi3_conv.weight.detach().flatten(),
bins=201)
#########################################################
# verification: no information in non-quantized weights
#########################################################
verification = False
if verification:
quantModules = INQController.getInqModules(model)
#check proper quantization levels
from matplotlib import pyplot as plt
plt.hist(quantModules[4].weightFrozen.detach().flatten().numpy(), bins=30)
#remove non-quantized information for test run
for m in quantModules:
m.weight.data.zero_()
state_dicts['indiv']['net'] = model.state_dict()
torch.save(state_dicts, path.replace('.ckpt', '_verify.ckpt'))
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,154
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/inq_ops.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import itertools
import torch
import torch.nn as nn
import quantlab.indiv as indiv
class INQController(indiv.Controller):
"""Instantiate typically once per network, provide it with a list of INQ
modules to control and a INQ schedule, and insert a call to the step
function once per epoch. """
def __init__(self, modules, schedule, clearOptimStateOnStep=False,
stepEveryEpoch=False, rescaleWeights=False):
super().__init__()
self.modules = modules
schedule = {int(k): v for k, v in schedule.items()} #parse string keys to ints
self.schedule = schedule # dictionary mapping epoch to fraction
self.clearOptimStateOnStep = clearOptimStateOnStep
self.fraction = 0.0
self.stepEveryEpoch = stepEveryEpoch
self.rescaleWeights = rescaleWeights
def step_preTraining(self, epoch, optimizer=None, tensorboardWriter=None):
if epoch in self.schedule.keys():
self.fraction = self.schedule[epoch]
elif self.stepEveryEpoch:
pass
else:
return
#log to tensorboard
if tensorboardWriter != None:
tensorboardWriter.add_scalar('INQ/fraction',
self.fraction, global_step=epoch)
#step each INQ module
for m in self.modules:
m.step(self.fraction)
#clear optimizer state (e.g. Adam's momentum)
if self.clearOptimStateOnStep and optimizer != None:
optimizer.state.clear()
def step_postOptimStep(self, *args, **kwargs):
if self.rescaleWeights:
for m in self.modules:
m.weightInqCtrl.rescaleWeights()
@staticmethod
def getInqModules(net):
return [m
for m in net.modules()
if (isinstance(m, INQLinear) or isinstance(m, INQConv1d) or
isinstance(m, INQConv2d))]
class INQParameterController:
"""Used to implement INQ functionality within a custom layer (e.g. INQConv2d).
Creates and register all relevant fields and parameters in the module. """
def __init__(self, module, parameterName, numLevels=3,
strategy="magnitude", backCompat=True,
quantInitMethod=None):#'uniform-l1opt'
self.module = module
self.parameterName = parameterName
self.backCompat = backCompat
self.numLevels = numLevels
self.strategy = strategy # "magnitude" or "random" or "magnitude-SRQ"/"RPR"
self.fraction = 0.0
self.quantInitMethod = quantInitMethod
if self.backCompat:
assert(parameterName == 'weight')
assert(not hasattr(module, 'weightFrozen'))
assert(not hasattr(module, 'sParam'))
self.pnameFrozen = 'weightFrozen'
self.pnameS = 'sParam'
else:
#more structured; adds support for multiple indep. INQ parameters
self.pnameFrozen = parameterName + '_inqFrozen'
self.pnameS = parameterName + '_inqS'
module.__setattr__(self.pnameFrozen,
nn.Parameter(torch.full_like(self.weight, float('NaN')),
requires_grad=False))
module.__setattr__(self.pnameS,
nn.Parameter(torch.full((1,), float('NaN')).to(self.weight),
requires_grad=False))
def getWeightParams(self, module):
weight = module.__getattr__(self.parameterName)
weightFrozen = module.__getattr__(self.pnameFrozen)
return weight, weightFrozen
@property
def weight(self):
return self.module.__getattr__(self.parameterName)
@property
def weightFrozen(self):
return self.module.__getattr__(self.pnameFrozen)
@property
def sParam(self):
return self.module.__getattr__(self.pnameS)
@property
def s(self):
return self.sParam.item()
@s.setter
def s(self, value):
self.sParam[0] = value
@staticmethod
def inqQuantize(weight, quantLevels):
"""Quantize a single weight using the INQ quantization scheme."""
bestQuantLevel = torch.zeros_like(weight)
minQuantError = torch.full_like(weight, float('inf'))
for ql in quantLevels:
qerr = (weight-ql).abs()
mask = qerr < minQuantError
bestQuantLevel[mask] = ql
minQuantError[mask] = qerr[mask]
quantizedWeight = bestQuantLevel
return quantizedWeight
def inqStep(self, fraction):
if self.quantInitMethod == None:
#update s
if self.fraction == 0.0 and math.isnan(self.s):
self.s = torch.max(torch.abs(self.weight.data)).item()
#compute quantization levels
n_1 = math.floor(math.log((4*self.s)/3, 2))
n_2 = int(n_1 + 2 - (self.numLevels // 2))
if self.numLevels >= 3:
quantLevelsPos = (2**i for i in range(n_2, n_1+1))
quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))
quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)
else:
assert(self.numLevels == 2)
quantLevels = [self.s/2, -self.s/2]#[2**n_2, -2**n_2]
elif self.quantInitMethod == 'uniform':
# update s
if self.fraction == 0.0 and math.isnan(self.s):
self.s = torch.max(torch.abs(self.weight.data)).item()
#compute quantization levels
quantLevels = torch.linspace(-self.s, self.s, steps=self.numLevels)
elif self.quantInitMethod in ['uniform-l1opt',
'uniform-l2opt',
'uniform-perCh-l2opt',
'uniform-linfopt']:
getQLs = lambda s: torch.linspace(-s, s, steps=self.numLevels)
if self.fraction == 0.0 and math.isnan(self.s):
import scipy.optimize
def optimWeight(weight):
def loss(s):
s = s.item()
qls = getQLs(s)
for i, ql in enumerate(qls):
tmp = (weight-ql).abs()
if i == 0:
minQuantErr = tmp
else:
minQuantErr = torch.min(minQuantErr, tmp)
if self.quantInitMethod == 'uniform-l1opt':
return minQuantErr.norm(p=1).item()
elif self.quantInitMethod in ['uniform-l2opt', 'uniform-perCh-l2opt']:
return minQuantErr.norm(p=2).item()
elif self.quantInitMethod == 'uniform-linfopt':
return minQuantErr.norm(p=float('inf')).item()
else:
assert(False)
bounds = (1e-6, weight.abs().max().item())
optRes = scipy.optimize.brute(loss, ranges=(bounds,),
Ns=1000, disp=True,
finish=scipy.optimize.fmin)
s = optRes[0]
weight.mul_(1/s)
s = 1
return s
if self.quantInitMethod in ['uniform-l1opt',
'uniform-l2opt',
'uniform-linfopt']:
self.s = optimWeight(self.weight.data.flatten().detach())
elif self.quantInitMethod in ['uniform-perCh-l2opt']:
self.s = 1
for c in range(self.weight.size(0)):
optimWeight(self.weight.data[c].flatten().detach())
quantLevels = getQLs(self.s)
else:
assert(False)
self.fraction = fraction
if self.strategy == "magnitude-SRQ" or self.strategy == "RPR":
if self.fraction == None:
return
#get current weights quantized
self.weightFrozen.data.copy_(self.inqQuantize(self.weight.data, quantLevels))
numUnFreeze = int((1-self.fraction)*self.weight.numel())
idxsUnFreeze = torch.randperm(self.weight.numel())[:numUnFreeze]
self.weightFrozen.data.flatten()[idxsUnFreeze] = float('NaN')
else:
#get number of weights to quantize
prevCount = self.weightFrozen.numel() - torch.isnan(self.weightFrozen.data).sum(dtype=torch.long).item()
newCount = int(self.fraction*self.weightFrozen.numel())
#find indexes of weights to quant
if self.strategy == "magnitude":
self.weight.data[~torch.isnan(self.weightFrozen.data)].fill_(0)
_, idxsSorted = self.weight.data.flatten().abs().sort(descending=True)
elif self.strategy == "random":
idxsSorted = torch.randperm(self.weight.numel())
else:
assert(False)
idxsFreeze = idxsSorted[:newCount-prevCount]
#quantize the weights at these indexes
self.weightFrozen.data.flatten()[idxsFreeze] = self.inqQuantize(self.weight.data.flatten()[idxsFreeze], quantLevels)
def inqAssembleWeight(self, module=None):
#with nn.DataParallel, the module is copied, so self.module cannot be used
weight, weightFrozen = self.getWeightParams(module)
weightFrozen = weightFrozen.detach()
frozen = ~torch.isnan(weightFrozen)
weightAssembled = torch.zeros_like(weightFrozen)
weightAssembled[frozen] = weightFrozen[frozen]
fullPrecSelector = torch.isnan(weightFrozen).float()
tmp = fullPrecSelector*weight
weightAssembled = weightAssembled + tmp
return weightAssembled
def rescaleWeights(self):
self.weight.data.mul_((self.s/2)/self.weight.data.abs().mean().item())
class INQLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True,
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_features, out_features, bias)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
return nn.functional.linear(input, weightAssembled, self.bias)
class INQConv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups,
bias, padding_mode)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)
return nn.functional.conv1d(
nn.functional.pad(input, expanded_padding, mode='circular'),
weightAssembled, self.bias, self.stride,
(0,), self.dilation, self.groups)
return nn.functional.conv1d(input, weightAssembled, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class INQConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
numLevels=3, strategy="magnitude", quantInitMethod=None):
super().__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups,
bias, padding_mode)
self.weightInqCtrl = INQParameterController(self, 'weight',
numLevels, strategy,
quantInitMethod=quantInitMethod)
def step(self, fraction):
self.weightInqCtrl.inqStep(fraction)
def forward(self, input):
weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return nn.functional.conv2d(nn.functional.pad(input, expanded_padding, mode='circular'),
weightAssembled, self.bias, self.stride,
(0,), self.dilation, self.groups)
return nn.functional.conv2d(input, weightAssembled, self.bias, self.stride,
self.padding, self.dilation, self.groups)
if __name__ == '__main__':
x = torch.linspace(-2,2,100)
numLevels = 3
s = torch.max(torch.abs(x)).item()
n_1 = math.floor(math.log((4*s)/3, 2))
n_2 = int(n_1 + 2 - (numLevels//2))
quantLevelsPos = (2**i for i in range(n_2, n_1+1))
quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))
quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)
x_q = INQParameterController.inqQuantize(x, quantLevels)
import matplotlib.pyplot as plt
plt.clf()
plt.plot(x.numpy())
plt.plot(x_q.numpy())
model = INQLinear(2, 3, bias=False,
numLevels=numLevels, strategy="RPR")
print(model.weight)
print(model.weightFrozen)
model.step(0.5)
print(model.weight)
print(model.weightFrozen)
x = torch.randn(4,2)
y = model(x)
L = y.norm(p=2)
L.backward()
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,155
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/protocol/rooms.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
from progress.bar import FillingSquaresBar
import torch
import quantlab.indiv as indiv
def train(logbook, net, device, loss_fn, opt, train_l):
"""Run one epoch of the training experiment."""
logbook.meter.reset()
bar = FillingSquaresBar('Training \t', max=len(train_l))
controllers = indiv.Controller.getControllers(net)
for i_batch, data in enumerate(train_l):
# load data onto device
inputs, gt_labels = data
inputs = inputs.to(device)
gt_labels = gt_labels.to(device)
# forprop
pr_outs = net(inputs)
loss = loss_fn(pr_outs, gt_labels)
# update statistics
logbook.meter.update(pr_outs, gt_labels, loss.item(), track_metric=logbook.track_metric)
bar.suffix = 'Total: {total:} | ETA: {eta:} | Epoch: {epoch:4d} | ({batch:5d}/{num_batches:5d})'.format(
total=bar.elapsed_td,
eta=bar.eta_td,
epoch=logbook.i_epoch,
batch=i_batch + 1,
num_batches=len(train_l))
bar.suffix = bar.suffix + logbook.meter.bar()
bar.next()
# backprop
opt.zero_grad()
loss.backward()
opt.step()
for ctrl in controllers:
ctrl.step_postOptimStep()
bar.finish()
stats = {
'train_loss': logbook.meter.avg_loss,
'train_metric': logbook.meter.avg_metric
}
for k, v in stats.items():
if v:
logbook.writer.add_scalar(k, v, global_step=logbook.i_epoch)
logbook.writer.add_scalar('learning_rate', opt.param_groups[0]['lr'], global_step=logbook.i_epoch)
return stats
def test(logbook, net, device, loss_fn, test_l, valid=False, prefix=None):
"""Run a validation epoch."""
logbook.meter.reset()
bar_title = 'Validation \t' if valid else 'Test \t'
bar = FillingSquaresBar(bar_title, max=len(test_l))
with torch.no_grad():
for i_batch, data in enumerate(test_l):
# load data onto device
inputs, gt_labels = data
inputs = inputs.to(device)
gt_labels = gt_labels.to(device)
# forprop
tensor_stats, pr_outs = net.forward_with_tensor_stats(inputs)
loss = loss_fn(pr_outs, gt_labels)
# update statistics
logbook.meter.update(pr_outs, gt_labels, loss.item(), track_metric=True)
bar.suffix = 'Total: {total:} | ETA: {eta:} | Epoch: {epoch:4d} | ({batch:5d}/{num_batches:5d})'.format(
total=bar.elapsed_td,
eta=bar.eta_td,
epoch=logbook.i_epoch,
batch=i_batch + 1,
num_batches=len(test_l))
bar.suffix = bar.suffix + logbook.meter.bar()
bar.next()
bar.finish()
if prefix == None:
prefix = 'valid' if valid else 'test'
stats = {
prefix+'_loss': logbook.meter.avg_loss,
prefix+'_metric': logbook.meter.avg_metric
}
if valid:
for k, v in stats.items():
if v:
logbook.writer.add_scalar(k, v, global_step=logbook.i_epoch)
for name, tensor in tensor_stats:
logbook.writer.add_histogram(name, tensor, global_step=logbook.i_epoch)
return stats
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,156
|
xiaywang/QuantLab
|
refs/heads/master
|
/plot_npz_tb.py
|
import os
import numpy as np
import argparse
import matplotlib.pyplot as plt
def plot_npz(filename, export=None, act_quant_line=None):
data = dict(np.load(filename))
if 'num_trials' in data:
del data['num_trials']
plot_data(data, export, act_quant_line)
def plot_tb(filename, export=None, act_quant_line=None):
from eegnet_run import _prepare_scalar_array_from_tensorboard as prepare_tb_array
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
ea = EventAccumulator(filename)
ea.Reload()
data = {key: prepare_tb_array(ea, key) for key in ea.Tags()['scalars']}
plot_data(data, export, act_quant_line)
def plot_data(data, export=None, act_quant_line=None):
# decide for each key to which plot it should belong
loss_plot = {}
acc_plot = {}
n_epochs = None
for name, array in data.items():
if n_epochs is None:
n_epochs = len(array)
else:
assert len(array) == n_epochs, f"{name} has length {len(array)} but should be {n_epochs}"
l_name = name.lower()
if 'metric' in l_name or 'acc' in l_name or 'accuracy' in l_name:
acc_plot[name] = array
elif 'loss' in l_name:
loss_plot[name] = array
elif l_name == 'learning_rate':
pass
else:
# ask user to which plot it should be added
choice = input(f"Where to put {name}? [b]oth, [l]oss, [a]ccuracy, [N]one? > ")
choice = choice.lower() if choice else 'n'
assert choice in ['b', 'l', 'a', 'n']
if choice in ['b', 'l']:
loss_plot[name] = array
if choice in ['b', 'a']:
acc_plot[name] = array
generate_figure(loss_plot, acc_plot, n_epochs, export, act_quant_line)
def generate_figure(loss_plot, acc_plot, n_epochs, export=None, act_quant_line=None):
# make sure that the environment variables are set (to hide the unnecessary output)
if "XDG_RUNTIME_DIR" not in os.environ:
tmp_dir = "/tmp/runtime-eegnet"
os.environ["XDG_RUNTIME_DIR"] = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
os.chmod(tmp_dir, 700)
# prepare data
x = np.array(range(1, n_epochs + 1))
# prepare the plot
fig = plt.figure(figsize=(20, 10))
# do loss figure
loss_subfig = fig.add_subplot(121)
add_subplot(loss_plot, x, loss_subfig, "Loss", "upper center", act_quant_line)
# do accuracy figure
acc_subfig = fig.add_subplot(122)
add_subplot(acc_plot, x, acc_subfig, "Accuracy", "lower center", act_quant_line)
# save the image
if export is None:
plt.show()
else:
fig.savefig(export, bbox_inches='tight')
# close
plt.close('all')
def add_subplot(data, x, subfig, title, legend_pos=None, act_quant_line=None):
plt.grid()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
additional_axis = []
lines = []
if act_quant_line is not None:
lines.append(plt.axvline(x=act_quant_line, label='Activation Quantization', color=colors[2]))
for i, key in enumerate(data.keys()):
if key.startswith('train_'):
new_lines = subfig.plot(x, data[key], label=key, color=colors[0])
elif key.startswith('valid_'):
new_lines = subfig.plot(x, data[key], label=key, color=colors[1])
else:
tmp_axis = subfig.twinx()
tmp_axis.set_ylabel(key)
new_lines = tmp_axis.plot(x, data[key], label=key, color=colors[i+3])
additional_axis.append(tmp_axis)
lines += new_lines
for i, axis in enumerate(additional_axis):
axis.spines['right'].set_position(('axes', 1 + i * 0.15))
if i > 0:
axis.set_frame_on(True)
axis.patch.set_visible(False)
subfig.set_title(title)
subfig.set_xlabel("Epoch")
labels = [l.get_label() for l in lines]
last_ax = additional_axis[-1] if additional_axis else subfig
last_ax.legend(lines, labels, frameon=True, framealpha=1, facecolor='white', loc=legend_pos)
return len(additional_axis)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='filename of the data', nargs=1)
parser.add_argument('-t', '--tensorboard', help='Data is of tensorboard format',
action='store_true')
parser.add_argument('-n', '--numpy', help='Data is of numpy npz format',
action='store_true')
parser.add_argument('-e', '--export', help='export plot to specified file', type=str)
parser.add_argument('--act_quant_line', help='position of vertical line', type=int)
args = parser.parse_args()
# if both tensorboard and numpy are not set, infer the type by the file ending
filename = args.file[0]
if not args.tensorboard and not args.numpy:
if 'events.out.tfevents' in filename:
args.tensorboard = True
elif filename.endswith('.npz'):
args.numpy = True
else:
raise RuntimeError(f'Cannot automatically detect type of the file: {args.file}')
if args.tensorboard:
plot_tb(filename, args.export, args.act_quant_line)
elif args.numpy:
plot_npz(filename, args.export, args.act_quant_line)
else:
raise RuntimeError()
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,157
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/__init__.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
class Controller(object):
def __init__(self):
pass
def step(self, epoch, optimizer=None, tensorboardWriter=None):
pass
def step_preTraining(self, *args, **kwargs):
self.step(*args, **kwargs)
def step_preValidation(self, *args, **kwargs):
pass
def step_postOptimStep(self, *args, **kwargs):
pass
@staticmethod
def getControllers(net):
return [v for m in net.modules()
for v in m.__dict__.values()
if isinstance(v, Controller)]
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,158
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/indiv/transfer.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import os
import torch
from quantlab.protocol.logbook import _exp_align_, _ckpt_align_
def load_pretrained(logbook, net):
#get path to pretrained network
pre_config = logbook.config['indiv']['net']['pretrained']
if isinstance(pre_config['file'], str):
ckpt_file = os.path.join(os.path.dirname(logbook.dir_logs), logbook.topology, 'pretrained', pre_config['file'])
if not os.path.exists(ckpt_file):
ckpt_file = pre_config['file']
elif isinstance(pre_config['file'], dict):
dir_exp = 'exp' + str(pre_config['file']['exp_id']).rjust(_exp_align_, '0')
epoch_str = str(pre_config['file']['epoch'])
if epoch_str.isnumeric():
ckpt_id = epoch_str.rjust(_ckpt_align_, '0')
ckpt_name = 'epoch' + ckpt_id + '.ckpt'
else:
#e.g. for 'best', 'last'
ckpt_name = epoch_str + '.ckpt'
ckpt_file = os.path.join(logbook.dir_logs, dir_exp, 'saves', ckpt_name)
if logbook.verbose:
print('Loading checkpoint: {}'.format(ckpt_file))
#load network params
net_dict = net.state_dict()
pretrained_dict = torch.load(ckpt_file)['indiv']['net']
if 'parameters' in pre_config.keys():
#load selected parameters
parameters = []
for group_name in pre_config['parameters']:
parameters += [k for k in pretrained_dict.keys() if k.startswith(group_name) and not k.endswith('num_batches_tracked')]
net_dict.update({k: v for k, v in pretrained_dict.items() if k in parameters})
else:
#load all parameters if not specified
net_dict = pretrained_dict
missing_keys, unexpected_keys = net.load_state_dict(net_dict, strict=False)
#report differences
if len(missing_keys) > 0:
print('WARNING: missing keys in pretrained net!')
for k in missing_keys:
print('key: %s' % k)
if len(unexpected_keys) > 0:
print('WARNING: unexpected keys in pretrained net!')
for k in unexpected_keys:
print('key: %s' % k)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,159
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ETHZ-CVL-AED/MeyerNet/acousticEventDetDatasetConvert.py
|
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import numpy as np
import re
import os
import pickle
def readSingleFile(fname):
with open(fname) as f:
fileCont = f.read()
arrs = re.findall('array\(\[(.*)\]\)', fileCont)
arrs = [np.fromstring(a, sep=',', dtype=np.int16) for a in arrs]
# print('fname: %s' % fname)
# print([t.shape for t in arrs])
arrs = [t.reshape(64,-1) for t in arrs] #shape: n_t x 64
#sum of lengths: 8*60+48+52 = 580
#'normal' size: 400 --> overlap of 10 on both sides (or 20 on one)
arrsConcat = [arrs[0]] + [t[:,20:] for t in arrs[1:]]
spectrogram = np.concatenate(arrsConcat, axis=1)
return spectrogram #64 x 25600
def getClasses(rootDir):
filelist = os.listdir(rootDir)
# regex for format {className}_{someNum}_{randomString}.csv to parse class
classes = (re.findall('^(.*)\_\d*_.*.csv$', fname) for fname in filelist)
classes = filter(lambda s: len(s) >= 1, classes)
classes = (s[0] for s in classes)
classes = list(set(classes)) # uniquify
return classes
def readClassSpectrograms(cl, rootDir):
filelist = os.listdir(rootDir)
clFiles = (re.findall('^(%s_.*.csv)$' % cl, fname) for fname in filelist)
clFiles = filter(lambda s: len(s) >= 1, clFiles)
clFiles = (rootDir + s[0] for s in clFiles)
clSpectrograms = [readSingleFile(fname) for fname in clFiles]
return clSpectrograms
#readSingleFile('./test/car_172_offset25.csv')
#readSingleFile('./test/car_172_offset50.csv')
classes = getClasses('./train/')
print('classes: %s' % str(classes))
datasetTrain = {cl: readClassSpectrograms(cl, './train/') for cl in classes}
datasetTest = {cl: readClassSpectrograms(cl, './test/') for cl in classes}
fname = './train.pickle'
with open(fname, 'wb') as f:
pickle.dump(datasetTrain, f)
fname = './test.pickle'
with open(fname, 'wb') as f:
pickle.dump(datasetTest, f)
#import matplotlib.pyplot as plt
#spectrogram = datasetTrain['acoustic_guitar'][3]
#plt.imshow(spectrogram)
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,160
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani, Tibor Schneider
from os import path
import numpy as np
import scipy.io as sio
from scipy.signal import butter, sosfilt
import numpy as np
import torch as t
from torchvision.transforms import ToTensor, Normalize, Compose
from quantlab.treat.data.split import transform_random_split
"""
In order to use this preprocessing module, use the following 'data' configuration
"data": {
"subject": 1
"fs": 250,
"f1_fraction": 1.5,
"f2_fraction": 6.0,
"filter": {
# SEE BELOW
}
"valid_fraction": 0.1,
"bs_train": 32,
"bs_valid": 32,
"use_test_as_valid": false
}
For using no filter, you can leave out the "data"."filter" object, or set the "data".filter"."type"
to "none".
For using highpass, use the following filter
"filter": {
"type": "highpass",
"fc": 4.0,
"order": 4
}
For using bandpass, use the following filter
"filter": {
"type": "bandpass",
"fc_low": 4.0,
"fc_high": 40.0,
"order": 5
}
"""
class BCI_CompIV_2a(t.utils.data.Dataset):
def __init__(self, root, train, subject, transform=None):
self.subject = subject
self.root = root
self.train = train
self.transform = transform
self.samples, self.labels = self._load_data()
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
sample = self.samples[idx, :, :]
label = self.labels[idx]
if self.transform:
sample = self.transform(sample)
return sample, label
def _load_data(self):
NO_channels = 22
NO_tests = 6 * 48
Window_Length = 7 * 250
class_return = np.zeros(NO_tests, dtype=np.float32)
data_return = np.zeros((NO_tests, NO_channels, Window_Length), dtype=np.float32)
n_valid_trials = 0
if self.train:
a = sio.loadmat(path.join(self.root, 'A0' + str(self.subject) + 'T.mat'))
else:
a = sio.loadmat(path.join(self.root, 'A0' + str(self.subject) + 'E.mat'))
a_data = a['data']
for ii in range(0, a_data.size):
a_data1 = a_data[0, ii]
a_data2 = [a_data1[0, 0]]
a_data3 = a_data2[0]
a_X = a_data3[0]
a_trial = a_data3[1]
a_y = a_data3[2]
a_fs = a_data3[3]
# a_classes = a_data3[4]
a_artifacts = a_data3[5]
# a_gender = a_data3[6]
# a_age = a_data3[7]
for trial in range(0, a_trial.size):
if a_artifacts[trial] == 0:
range_a = int(a_trial[trial])
range_b = range_a + Window_Length
data_return[n_valid_trials, :, :] = np.transpose(a_X[range_a:range_b, :22])
class_return[n_valid_trials] = int(a_y[trial])
n_valid_trials += 1
data_return = data_return[0:n_valid_trials, :, :]
class_return = class_return[0:n_valid_trials]
class_return = class_return - 1
data_return = t.Tensor(data_return).to(dtype=t.float)
class_return = t.Tensor(class_return).to(dtype=t.long)
return data_return, class_return
class HighpassFilter(object):
def __init__(self, fs, fc, order):
nyq = 0.5 * fs
norm_fc = fc / nyq
self.sos = butter(order, norm_fc, btype='highpass', output='sos')
def __call__(self, sample):
for ch in sample.shape[0]:
sample[ch, :] = sosfilt(self.sos, sample[ch, :])
return sample
class BandpassFilter(object):
def __init__(self, fs, fc_low, fc_high, order):
nyq = 0.5 * fs
norm_fc_low = fc_low / nyq
norm_fc_high = fc_high / nyq
self.sos = butter(order, [norm_fc_low, norm_fc_high], btype='bandpass', output='sos')
def __call__(self, sample):
for ch in sample.shape[0]:
sample[ch, :] = sosfilt(self.sos, sample[ch, :])
return sample
class Identity(object):
def __call__(self, sample):
return sample
class TimeWindowPostCue(object):
def __init__(self, fs, t1_factor, t2_factor):
self.t1 = int(t1_factor * fs)
self.t2 = int(t2_factor * fs)
def __call__(self, sample):
return sample[:, :, self.t1:self.t2]
class ReshapeTensor(object):
def __call__(self, sample):
return sample.view(1, sample.shape[0], sample.shape[1])
def get_transform(fs, t1_factor, t2_factor, filter_config):
# make sure that filter_config exists
if filter_config is None:
filter_config = {'type': None}
elif 'type' not in filter_config:
filter_config['type'] = 'none'
if filter_config['type'] == 'highpass':
filter_transform = HighpassFilter(fs, filter_config['fc'], filter_config['order'])
elif filter_config['type'] == 'bandpass':
filter_transform = BandpassFilter(fs, filter_config['fc_low'], filter_config['fc_high'],
filter_config['order'])
else:
filter_transform = Identity()
return Compose([filter_transform,
ReshapeTensor(),
TimeWindowPostCue(fs, t1_factor, t2_factor)])
def load_data_sets(dir_data, data_config):
transform = get_transform(data_config['fs'], data_config['t1_factor'],
data_config['t2_factor'], data_config['filter'])
trainvalid_set = BCI_CompIV_2a(root=dir_data, train=True, subject=data_config['subject'])
if data_config.get("use_test_as_valid", False):
# use the test set as the validation set
train_set = trainvalid_set
train_set.transform = transform
valid_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)
test_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)
else:
# split train set into train and validation set
len_train = int(len(trainvalid_set) * (1.0 - data_config['valid_fraction']))
train_set, valid_set = transform_random_split(trainvalid_set, [len_train, len(trainvalid_set) - len_train],
[transform, transform])
test_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)
return train_set, valid_set, test_set
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,161
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/ResNet/__init__.py
|
from .preprocess import load_data_sets
from .postprocess import postprocess_pr, postprocess_gt
from .resnet import ResNet
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,162
|
xiaywang/QuantLab
|
refs/heads/master
|
/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py
|
# Copyright (c) 2019 UniMoRe, Matteo Spallanzani
# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli
import math
import torch.nn as nn
# In order for the baselines to be launched with the same logic as quantized
# models, an empty quantization scheme and an empty thermostat schedule need
# to be configured.
# Use the following templates for the `net` and `thermostat` configurations:
#
# "net": {
# "class": "MobileNetv2Baseline",
# "params": {"capacity": 1, "expansion": 6},
# "pretrained": null,
# "loss_fn": {
# "class": "CrossEntropyLoss",
# "params": {}
# }
# }
#
# "thermostat": {
# "class": "MobileNetv2Baseline",
# "params": {
# "noise_schemes": {},
# "bindings": []
# }
# }
class MobileNetv2Baseline(nn.Module):
"""MobileNetv2 Convolutional Neural Network."""
def __init__(self, capacity=1, expansion=6):
super().__init__()
c0 = 3
t0 = int(32 * capacity) * 1
c1 = int(16 * capacity)
t1 = c1 * expansion
c2 = int(24 * capacity)
t2 = c2 * expansion
c3 = int(32 * capacity)
t3 = c3 * expansion
c4 = int(64 * capacity)
t4 = c4 * expansion
c5 = int(96 * capacity)
t5 = c5 * expansion
c6 = int(160 * capacity)
t6 = c6 * expansion
c7 = int(320 * capacity)
c8 = max(int(1280 * capacity), 1280)
# first block
self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)
self.phi01_bn = nn.BatchNorm2d(t0)
self.phi01_act = nn.ReLU6(inplace=True)
self.phi02_conv = nn.Conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)
self.phi02_bn = nn.BatchNorm2d(t0)
self.phi02_act = nn.ReLU6(inplace=True)
self.phi03_conv = nn.Conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi03_bn = nn.BatchNorm2d(c1)
# second block
self.phi04_conv = nn.Conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)
self.phi04_bn = nn.BatchNorm2d(t1)
self.phi04_act = nn.ReLU6(inplace=True)
self.phi05_conv = nn.Conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)
self.phi05_bn = nn.BatchNorm2d(t1)
self.phi05_act = nn.ReLU6(inplace=True)
self.phi06_conv = nn.Conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi06_bn = nn.BatchNorm2d(c2)
self.phi07_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi07_bn = nn.BatchNorm2d(t2)
self.phi07_act = nn.ReLU6(inplace=True)
self.phi08_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)
self.phi08_bn = nn.BatchNorm2d(t2)
self.phi08_act = nn.ReLU6(inplace=True)
self.phi09_conv = nn.Conv2d(t2, c2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi09_bn = nn.BatchNorm2d(c2)
# third block
self.phi10_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)
self.phi10_bn = nn.BatchNorm2d(t2)
self.phi10_act = nn.ReLU6(inplace=True)
self.phi11_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)
self.phi11_bn = nn.BatchNorm2d(t2)
self.phi11_act = nn.ReLU6(inplace=True)
self.phi12_conv = nn.Conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi12_bn = nn.BatchNorm2d(c3)
self.phi13_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi13_bn = nn.BatchNorm2d(t3)
self.phi13_act = nn.ReLU6(inplace=True)
self.phi14_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi14_bn = nn.BatchNorm2d(t3)
self.phi14_act = nn.ReLU6(inplace=True)
self.phi15_conv = nn.Conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi15_bn = nn.BatchNorm2d(c3)
self.phi16_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi16_bn = nn.BatchNorm2d(t3)
self.phi16_act = nn.ReLU6(t3)
self.phi17_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)
self.phi17_bn = nn.BatchNorm2d(t3)
self.phi17_act = nn.ReLU6(inplace=True)
self.phi18_conv = nn.Conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi18_bn = nn.BatchNorm2d(c3)
# fourth block
self.phi19_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)
self.phi19_bn = nn.BatchNorm2d(t3)
self.phi19_act = nn.ReLU6(inplace=True)
self.phi20_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)
self.phi20_bn = nn.BatchNorm2d(t3)
self.phi20_act = nn.ReLU6(inplace=True)
self.phi21_conv = nn.Conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi21_bn = nn.BatchNorm2d(c4)
self.phi22_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi22_bn = nn.BatchNorm2d(t4)
self.phi22_act = nn.ReLU6(inplace=True)
self.phi23_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi23_bn = nn.BatchNorm2d(t4)
self.phi23_act = nn.ReLU6(inplace=True)
self.phi24_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi24_bn = nn.BatchNorm2d(c4)
self.phi25_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi25_bn = nn.BatchNorm2d(t4)
self.phi25_act = nn.ReLU6(inplace=True)
self.phi26_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi26_bn = nn.BatchNorm2d(t4)
self.phi26_act = nn.ReLU6(inplace=True)
self.phi27_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi27_bn = nn.BatchNorm2d(c4)
self.phi28_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi28_bn = nn.BatchNorm2d(t4)
self.phi28_act = nn.ReLU6(inplace=True)
self.phi29_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi29_bn = nn.BatchNorm2d(t4)
self.phi29_act = nn.ReLU6(inplace=True)
self.phi30_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi30_bn = nn.BatchNorm2d(c4)
# fifth block
self.phi31_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)
self.phi31_bn = nn.BatchNorm2d(t4)
self.phi31_act = nn.ReLU6(inplace=True)
self.phi32_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)
self.phi32_bn = nn.BatchNorm2d(t4)
self.phi32_act = nn.ReLU6(inplace=True)
self.phi33_conv = nn.Conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi33_bn = nn.BatchNorm2d(c5)
self.phi34_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi34_bn = nn.BatchNorm2d(t5)
self.phi34_act = nn.ReLU6(inplace=True)
self.phi35_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi35_bn = nn.BatchNorm2d(t5)
self.phi35_act = nn.ReLU6(inplace=True)
self.phi36_conv = nn.Conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi36_bn = nn.BatchNorm2d(c5)
self.phi37_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi37_bn = nn.BatchNorm2d(t5)
self.phi37_act = nn.ReLU6(inplace=True)
self.phi38_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)
self.phi38_bn = nn.BatchNorm2d(t5)
self.phi38_act = nn.ReLU6(inplace=True)
self.phi39_conv = nn.Conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi39_bn = nn.BatchNorm2d(c5)
# sixth block
self.phi40_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)
self.phi40_bn = nn.BatchNorm2d(t5)
self.phi40_act = nn.ReLU6(inplace=True)
self.phi41_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)
self.phi41_bn = nn.BatchNorm2d(t5)
self.phi41_act = nn.ReLU6(inplace=True)
self.phi42_conv = nn.Conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi42_bn = nn.BatchNorm2d(c6)
self.phi43_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi43_bn = nn.BatchNorm2d(t6)
self.phi43_act = nn.ReLU6(inplace=True)
self.phi44_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi44_bn = nn.BatchNorm2d(t6)
self.phi44_act = nn.ReLU6(inplace=True)
self.phi45_conv = nn.Conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi45_bn = nn.BatchNorm2d(c6)
self.phi46_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi46_bn = nn.BatchNorm2d(t6)
self.phi46_act = nn.ReLU6(inplace=True)
self.phi47_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi47_bn = nn.BatchNorm2d(t6)
self.phi47_act = nn.ReLU6(inplace=True)
self.phi48_conv = nn.Conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi48_bn = nn.BatchNorm2d(c6)
# seventh block
self.phi49_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)
self.phi49_bn = nn.BatchNorm2d(t6)
self.phi49_act = nn.ReLU6(inplace=True)
self.phi50_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)
self.phi50_bn = nn.BatchNorm2d(t6)
self.phi50_act = nn.ReLU6(inplace=True)
self.phi51_conv = nn.Conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)
self.phi51_bn = nn.BatchNorm2d(c7)
# classifier
self.phi52_conv = nn.Conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)
self.phi52_bn = nn.BatchNorm2d(c8)
self.phi52_act = nn.ReLU6(inplace=True)
self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)
self.phi53_fc = nn.Linear(c8, 1000)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x, withStats=False):
# first block
x = self.phi01_conv(x)
x = self.phi01_bn(x)
x = self.phi01_act(x)
x = self.phi02_conv(x)
x = self.phi02_bn(x)
x = self.phi02_act(x)
x = self.phi03_conv(x)
x = self.phi03_bn(x)
# second block
x = self.phi04_conv(x)
x = self.phi04_bn(x)
x = self.phi04_act(x)
x = self.phi05_conv(x)
x = self.phi05_bn(x)
x = self.phi05_act(x)
x = self.phi06_conv(x)
x = self.phi06_bn(x)
x_res = self.phi07_conv(x)
x_res = self.phi07_bn(x_res)
x_res = self.phi07_act(x_res)
x_res = self.phi08_conv(x_res)
x_res = self.phi08_bn(x_res)
x_res = self.phi08_act(x_res)
x_res = self.phi09_conv(x_res)
x_res = self.phi09_bn(x_res)
x = x + x_res
# third block
x = self.phi10_conv(x)
x = self.phi10_bn(x)
x = self.phi10_act(x)
x = self.phi11_conv(x)
x = self.phi11_bn(x)
x = self.phi11_act(x)
x = self.phi12_conv(x)
x = self.phi12_bn(x)
x_res = self.phi13_conv(x)
x_res = self.phi13_bn(x_res)
x_res = self.phi13_act(x_res)
x_res = self.phi14_conv(x_res)
x_res = self.phi14_bn(x_res)
x_res = self.phi14_act(x_res)
x_res = self.phi15_conv(x_res)
x_res = self.phi15_bn(x_res)
x = x + x_res
x_res = self.phi16_conv(x)
x_res = self.phi16_bn(x_res)
x_res = self.phi16_act(x_res)
x_res = self.phi17_conv(x_res)
x_res = self.phi17_bn(x_res)
x_res = self.phi17_act(x_res)
x_res = self.phi18_conv(x_res)
x_res = self.phi18_bn(x_res)
x = x + x_res
# fourth block
x = self.phi19_conv(x)
x = self.phi19_bn(x)
x = self.phi19_act(x)
x = self.phi20_conv(x)
x = self.phi20_bn(x)
x = self.phi20_act(x)
x = self.phi21_conv(x)
x = self.phi21_bn(x)
x_res = self.phi22_conv(x)
x_res = self.phi22_bn(x_res)
x_res = self.phi22_act(x_res)
x_res = self.phi23_conv(x_res)
x_res = self.phi23_bn(x_res)
x_res = self.phi23_act(x_res)
x_res = self.phi24_conv(x_res)
x_res = self.phi24_bn(x_res)
x = x + x_res
x_res = self.phi25_conv(x)
x_res = self.phi25_bn(x_res)
x_res = self.phi25_act(x_res)
x_res = self.phi26_conv(x_res)
x_res = self.phi26_bn(x_res)
x_res = self.phi26_act(x_res)
x_res = self.phi27_conv(x_res)
x_res = self.phi27_bn(x_res)
x = x + x_res
x_res = self.phi28_conv(x)
x_res = self.phi28_bn(x_res)
x_res = self.phi28_act(x_res)
x_res = self.phi29_conv(x_res)
x_res = self.phi29_bn(x_res)
x_res = self.phi29_act(x_res)
x_res = self.phi30_conv(x_res)
x_res = self.phi30_bn(x_res)
x = x + x_res
# fifth block
x = self.phi31_conv(x)
x = self.phi31_bn(x)
x = self.phi31_act(x)
x = self.phi32_conv(x)
x = self.phi32_bn(x)
x = self.phi32_act(x)
x = self.phi33_conv(x)
x = self.phi33_bn(x)
x_res = self.phi34_conv(x)
x_res = self.phi34_bn(x_res)
x_res = self.phi34_act(x_res)
x_res = self.phi35_conv(x_res)
x_res = self.phi35_bn(x_res)
x_res = self.phi35_act(x_res)
x_res = self.phi36_conv(x_res)
x_res = self.phi36_bn(x_res)
x = x + x_res
x_res = self.phi37_conv(x)
x_res = self.phi37_bn(x_res)
x_res = self.phi37_act(x_res)
x_res = self.phi38_conv(x_res)
x_res = self.phi38_bn(x_res)
x_res = self.phi38_act(x_res)
x_res = self.phi39_conv(x_res)
x_res = self.phi39_bn(x_res)
x = x + x_res
# sixth block
x = self.phi40_conv(x)
x = self.phi40_bn(x)
x = self.phi40_act(x)
x = self.phi41_conv(x)
x = self.phi41_bn(x)
x = self.phi41_act(x)
x = self.phi42_conv(x)
x = self.phi42_bn(x)
x_res = self.phi43_conv(x)
x_res = self.phi43_bn(x_res)
x_res = self.phi43_act(x_res)
x_res = self.phi44_conv(x_res)
x_res = self.phi44_bn(x_res)
x_res = self.phi44_act(x_res)
x_res = self.phi45_conv(x_res)
x_res = self.phi45_bn(x_res)
x = x + x_res
x_res = self.phi46_conv(x)
x_res = self.phi46_bn(x_res)
x_res = self.phi46_act(x_res)
x_res = self.phi47_conv(x_res)
x_res = self.phi47_bn(x_res)
x_res = self.phi47_act(x_res)
x_res = self.phi48_conv(x_res)
x_res = self.phi48_bn(x_res)
x = x + x_res
# seventh block
x = self.phi49_conv(x)
x = self.phi49_bn(x)
x = self.phi49_act(x)
x = self.phi50_conv(x)
x = self.phi50_bn(x)
x = self.phi50_act(x)
x = self.phi51_conv(x)
x = self.phi51_bn(x)
# classifier
x = self.phi52_conv(x)
x = self.phi52_bn(x)
x = self.phi52_act(x)
x = self.phi53_avg(x)
x = x.view(x.size(0), -1)
x = self.phi53_fc(x)
if withStats:
stats = []
return stats, x
return x
def forward_with_tensor_stats(self, x):
stats, x = self.forward(x, withStats=True)
return stats, x
|
{"/quantlab/ImageNet/ResNet/resnet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py": ["/quantlab/indiv/stochastic_ops.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py"], "/quantlab/ImageNet/AlexNet/alexnet.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/ImageNet/GoogLeNet/__init__.py": ["/quantlab/ImageNet/GoogLeNet/preprocess.py", "/quantlab/ImageNet/GoogLeNet/googlenet.py"], "/quantlab/ImageNet/GoogLeNet/googlenet.py": ["/quantlab/indiv/inq_ops.py"], "/quantlab/ImageNet/MobileNetv2/__init__.py": ["/quantlab/ImageNet/MobileNetv2/preprocess.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py"], "/quantlab/indiv/daemon.py": ["/quantlab/indiv/transfer.py"], "/quantlab/indiv/ste_ops.py": ["/quantlab/indiv/__init__.py"], "/eegnet_run.py": ["/main.py"], "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py": ["/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/MNIST/MLP/mlp.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py"], "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py": ["/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py"], "/export_net_data.py": ["/main.py"], "/main.py": ["/quantlab/indiv/daemon.py", "/quantlab/treat/daemon.py", "/quantlab/protocol/rooms.py", "/quantlab/indiv/__init__.py"], "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py": ["/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py"], "/quantlab/CIFAR-10/VGG/vgg.py": ["/quantlab/indiv/stochastic_ops.py", "/quantlab/indiv/inq_ops.py", "/quantlab/indiv/ste_ops.py"], "/quantlab/indiv/inq_ops.py": ["/quantlab/indiv/__init__.py"], "/quantlab/protocol/rooms.py": ["/quantlab/indiv/__init__.py"], "/plot_npz_tb.py": ["/eegnet_run.py"], "/quantlab/ImageNet/ResNet/__init__.py": ["/quantlab/ImageNet/ResNet/postprocess.py", "/quantlab/ImageNet/ResNet/resnet.py"]}
|
21,196
|
Lila14/multimds
|
refs/heads/master
|
/scripts/tad_negative_control.py
|
import numpy as np
import os
from matplotlib import pyplot as plt
import sys
mat = np.loadtxt("A_background_filtered.bed", dtype=object)
m = len(mat)
ns = []
num_peaks = int(sys.argv[1])
num_overlap = int(sys.argv[2])
for i in range(100):
indices = np.random.randint(0, m-1, num_peaks)
rand_mat = mat[indices]
np.savetxt("negative_control.bed", rand_mat, fmt="%s", delimiter="\t")
os.system("bedtools intersect -a negative_control.bed -b GM12878_combined_K562_100kb_differential_tad_boundaries.bed > intersection.bed")
intersection = np.loadtxt("intersection.bed", dtype=object)
ns.append(len(intersection)/float(num_peaks))
plt.boxplot([ns, [num_overlap/float(num_peaks)]], labels=["Random A compartment", "Relocalization peaks"])
plt.ylabel("Fraction overlap with differential TAD boundaries")
plt.savefig("differential_tad_boundaries_enrichment")
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,197
|
Lila14/multimds
|
refs/heads/master
|
/scripts/loop_partners_polycomb.py
|
import os
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats as st
import sys
res_kb = int(sys.argv[1])
if os.path.isfile("polycomb_enrichment.txt"):
os.system("rm polycomb_enrichment.txt")
if os.path.isfile("enhancer_enrichment.txt"):
os.system("rm enhancer_enrichment.txt")
chroms = ["chr{}".format(chrom_num) for chrom_num in (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)]
partners = {}
for chrom in chroms:
partners[chrom] = {}
for chrom in chroms:
with open("{}_{}kb_edgeR_output_sig.tsv".format(chrom, res_kb)) as infile:
for line in infile:
line = line.strip().split()
loc1 = int(line[0])
loc2 = int(line[1])
fc = float(line[2])
try:
old_fc = partners[chrom][loc1][1]
if np.abs(fc) > np.abs(old_fc):
partners[chrom][loc1] = (loc2, fc)
except KeyError:
partners[chrom][loc1] = (loc2, fc)
try:
old_fc = partners[chrom][loc2][1]
if np.abs(fc) > np.abs(old_fc):
partners[chrom][loc2] = (loc1, fc)
except KeyError:
partners[chrom][loc2] = (loc1, fc)
infile.close()
with open("peaks_filtered_GM12878_only_enhancer.bed") as in_file:
for line in in_file:
line = line.strip().split()
chrom = line[0]
loc = int(line[1])
try:
partner, fc = partners[chrom][loc]
if fc < 0: #loop in K562 only
os.system("cat binding_data/wgEncodeBroadHistoneK562H3k27me3StdPk_%dkb_windows_enrichment.bed | awk '$1 == \"%s\" && $2 == %s {print $4}' >> polycomb_enrichment.txt"%(res_kb, chrom, partner))
else: #loop in GM12878 only
os.system("cat binding_data/GM12878_enhancers_%dkb_windows_enrichment.bed | awk '$1 == \"%s\" && $2 == %s {print $4}' >> enhancer_enrichment.txt"%(res_kb, chrom, partner))
except KeyError:
pass
in_file.close()
with open("peaks_filtered_K562_only_enhancer.bed") as in_file:
for line in in_file:
line = line.strip().split()
chrom = line[0]
loc = int(line[1])
try:
partner, fc = partners[chrom][loc]
if fc > 0: #loop in GM12878 only
os.system("cat binding_data/wgEncodeBroadHistoneGm12878H3k27me3StdPkV2_%dkb_windows_enrichment.bed | awk '$1 == \"%s\" && $2 == %s {print $4}' >> polycomb_enrichment.txt"%(res_kb, chrom, partner))
else: #loop in K562 only
os.system("cat binding_data/K562_enhancers_%dkb_windows_enrichment.bed | awk '$1 == \"%s\" && $2 == %s {print $4}' >> enhancer_enrichment.txt"%(res_kb, chrom, partner))
except KeyError:
pass
in_file.close()
with open("peaks_filtered_both_enhancer.bed") as in_file:
for line in in_file:
line = line.strip().split()
chrom = line[0]
loc = int(line[1])
try:
partner, fc = partners[chrom][loc]
os.system("cat binding_data/GM12878_enhancers_%dkb_windows_enrichment.bed | awk '$1 == \"%s\" && $2 == %s {print $4}' >> polycomb_enrichment.txt"%(res_kb, chrom, partner))
except KeyError:
pass
in_file.close()
os.system("bedtools coverage -a A_background_filtered.bed -b binding_data/wgEncodeBroadHistoneGm12878H3k27me3StdPkV2.broadPeak > A_background_filtered_polycomb.bed")
partner_enrichment = np.loadtxt("polycomb_enrichment.txt")
mat = np.loadtxt("A_background_filtered_polycomb.bed", dtype=object)
background_enrichment = np.array(mat[:,3], dtype=float)
print st.ttest_ind(background_enrichment, partner_enrichment)
plt.hist(background_enrichment, bins=30)
plt.show()
plt.hist(partner_enrichment, bins=30)
plt.show()
sys.exit(0)
x_int_size = 0.1
x_start = -x_int_size/5.
x_end = max((max(enrichments1), max(enrichments2)))
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
counts, bounds, patches = plt.hist(background_enrichment)
y_int_size = 2000
y_start = y_int_size/5.
y_end = counts[0] - y_int_size/5.
plt.title("Background A compartment", fontsize=14)
plt.xlabel("H3K27me3", fontsize=14)
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=6)
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=8)
plt.savefig("background_h3k27me3_coverage")
plt.show()
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
counts, bounds, patches = plt.hist(enrichments2)
y_int_size = 10
y_start = y_int_size/5.
y_end = counts[0] - y_int_size/5.
plt.title("Loop partners of lost enhancers", fontsize=14)
plt.xlabel("H3K27me3", fontsize=14)
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=6)
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=8)
plt.savefig("loop_partner_h3k27me3_coverage")
plt.show()
#plt.boxplot([background_enrichment, partner_enrichment], labels=("Background A compartment", "Loop partners"))
#plt.ylabel("H3K27me3 enrichment")
#plt.savefig("polycomb_enrichment")
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,198
|
Lila14/multimds
|
refs/heads/master
|
/scripts/sup3.py
|
import os
import numpy as np
import sys
sys.path.append("..")
import data_tools as dt
import plotting as plot
os.system("python ../multimds.py -P 0.1 -w 0 ctrl_Scer_13_32kb.bed galactose_Scer_13_32kb.bed")
struct1 = dt.structure_from_file("ctrl_Suva_13_32kb_structure.tsv")
struct2 = dt.structure_from_file("galactose_Suva_13_32kb_structure.tsv")
colors = np.zeros_like(struct1.getPoints(), dtype=int)
colors[struct1.get_rel_index(852000)] = 1
plot.plot_structures_interactive((struct1, struct2), (colors, colors))
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,199
|
Lila14/multimds
|
refs/heads/master
|
/scripts/dist_vs_compartment.py
|
import sys
sys.path.append("..")
from matplotlib import pyplot as plt
import data_tools as dt
import numpy as np
import compartment_analysis as ca
from scipy import stats as st
import linear_algebra as la
import os
from sklearn import svm
res_kb = 100
cell_type1 = "GM12878_combined"
cell_type2 = "K562"
chroms = (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)
multimds_z_rs = np.zeros_like(chroms, dtype=float)
contacts_pearson_rs = np.zeros_like(chroms, dtype=float)
contacts_spearman_rs = np.zeros_like(chroms, dtype=float)
for j, chrom in enumerate(chroms):
path1 = "hic_data/{}_{}_{}kb.bed".format(cell_type1, chrom, res_kb)
path2 = "hic_data/{}_{}_{}kb.bed".format(cell_type2, chrom, res_kb)
os.system("python ../multimds.py --full {} {}".format(path1, path2))
#load structures
structure1 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(cell_type1, chrom, res_kb))
structure2 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(cell_type2, chrom, res_kb))
#rescale
structure1.rescale()
structure2.rescale()
#make structures compatible
dt.make_compatible((structure1, structure2))
#compartments
mat1 = dt.matFromBed(path1, structure1)
mat2 = dt.matFromBed(path2, structure2)
compartments1 = ca.get_compartments(mat1)
compartments2 = ca.get_compartments(mat2)
r, p = st.pearsonr(compartments1, compartments2)
if r < 0:
compartments2 = -compartments2
compartment_diffs = compartments1 - compartments2
#SVR
coords1 = structure1.getCoords()
coords2 = structure2.getCoords()
coords = np.concatenate((coords1, coords2))
compartments = np.concatenate((compartments1, compartments2))
clf = svm.LinearSVR()
clf.fit(coords, compartments)
coef = clf.coef_
transformed_coords1 = np.array(la.change_coordinate_system(coef, coords1))
transformed_coords2 = np.array(la.change_coordinate_system(coef, coords2))
z_diffs = transformed_coords1[:,2] - transformed_coords2[:,2]
r, p = st.pearsonr(z_diffs, compartment_diffs)
multimds_z_rs[j] = r
#contacts Pearson
rs = np.zeros(len(mat1))
for i, (row1, row2) in enumerate(zip(mat1, mat2)):
rs[i], p = st.pearsonr(row1, row2)
r, p = st.pearsonr(1-rs, np.abs(compartment_diffs))
contacts_pearson_rs[j] = r
#contacts Spearman
rs = np.zeros(len(mat1))
for i, (row1, row2) in enumerate(zip(mat1, mat2)):
rs[i], p = st.spearmanr(row1, row2)
r, p = st.pearsonr(1-rs, np.abs(compartment_diffs))
contacts_spearman_rs[j] = r
#start with a frameless plot (extra room on the left)
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
#label axes
plt.ylabel("Correlation with compartment changes", fontsize=14)
#define offsets
xs = np.arange(len(chroms))
xmin = min(xs)
xmax = max(xs)
x_range = xmax - xmin
x_start = xmin - x_range/15. #bigger offset for bar plot
x_end = xmax + x_range/15.
ymin = 0
ymax = max([max(multimds_z_rs), max(independent_z_rs), max(contacts_pearson_rs), max(contacts_spearman_rs)])
y_range = ymax - ymin
y_start = ymin - y_range/25.
y_end = ymax + y_range/25.
width = 0.2
#plot data
plt.bar(xs, multimds_z_rs, width=width, bottom=y_start, label="MultiMDS")
plt.bar(xs+width, contacts_pearson_rs, width=width, bottom=y_start, label="Vector pearson r")
plt.bar(xs+2*width, contacts_spearman_rs, width=width, bottom=y_start, label="Vector spearman r")
#define axes with offsets
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
#plot axes (black with line width of 4)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=4)
#plot ticks
plt.xticks(xs, chroms)
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=12)
plt.legend()
plt.savefig("dist_vs_compartment")
plt.show()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,200
|
Lila14/multimds
|
refs/heads/master
|
/scripts/get_sig.py
|
from statsmodels.stats.multitest import multipletests
import sys
import os
in_path = sys.argv[1]
prefix = in_path.split(".")[0]
res = int(sys.argv[2])
ps = []
with open(in_path) as in_file:
for line in in_file:
line = line.strip().split()
if line[0] != "\"logFC\"": #skip header
ps.append(float(line[4]))
in_file.close()
reject, qs, alphacSidak, alphacBonf = multipletests(ps, method="fdr_bh")
i = 0
out1 = open(prefix + "_loc1.bed", "w")
out2 = open(prefix + "_loc2.bed", "w")
with open(in_path) as in_file:
for line in in_file:
line = line.strip().split()
if line[0] != "\"logFC\"":
loc_id = line[0].strip("\"").split(":")
chrom = loc_id[0]
loc1, loc2 = loc_id[1].split(",")
if qs[i] < 0.01:
out1.write("\t".join((chrom, loc1, str(int(loc1) + res), line[1])))
out1.write("\n")
out2.write("\t".join((chrom, loc2, str(int(loc2) + res))))
out2.write("\n")
i += 1
in_file.close()
out1.close()
out2.close()
os.system("bedtools intersect -a %s_loc1.bed -b mappability.bed -wb > %s_loc1_mappability.bed"%(prefix, prefix))
os.system("bedtools intersect -a %s_loc2.bed -b mappability.bed -wb > %s_loc2_mappability.bed"%(prefix, prefix))
os.system("paste %s_loc1_mappability.bed %s_loc2_mappability.bed | awk '$8 > 0.8 && $15 > 0.8 {print $2\"\t\"$10\"\t\"$4}' > %s_sig.tsv"%(prefix, prefix, prefix))
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,201
|
Lila14/multimds
|
refs/heads/master
|
/scripts/test_plot.py
|
import sys
sys.path.append("..")
import data_tools as dt
import plotting as plot
struct1 = dt.structure_from_file("GM12878_combined_21_100kb_structure.tsv")
struct2 = dt.structure_from_file("K562_21_100kb_structure.tsv")
plot.plot_structures_interactive((struct1, struct2))
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,202
|
Lila14/multimds
|
refs/heads/master
|
/scripts/plot_compartment_strength.py
|
from matplotlib import pyplot as plt
import sys
sys.path.append("..")
import compartment_analysis as ca
import data_tools as dt
import os
paths = sys.argv[1:len(sys.argv)]
prefixes = [os.path.basename(path) for path in paths]
structs = [dt.structureFromBed(path) for path in paths]
mats = [dt.matFromBed(path, struct) for path, struct in zip(paths, structs)]
all_comps = [ca.get_compartments(mat) for mat in mats]
all_gen_coords = [struct.getGenCoords() for struct in structs]
#all_comps[len(all_comps)-1] = -all_comps[len(all_comps)-1]
for gen_coords, comps, prefix in zip(all_gen_coords, all_comps, prefixes):
plt.plot(gen_coords, comps, label=prefix)
plt.legend()
plt.show()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,203
|
Lila14/multimds
|
refs/heads/master
|
/scripts/tadlib_input.py
|
import sys
sys.path.append("..")
import data_tools as dt
import os
cell_type = sys.argv[1]
os.system("mkdir -p {}_tadlib_input".format(cell_type))
for chrom in (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22):
path = "hic_data/{}_{}_100kb.bed".format(cell_type, chrom)
structure = dt.structureFromBed(path)
mat = dt.matFromBed(path, structure)
points = structure.getPoints()
with open("{}_tadlib_input/chr{}.txt".format(cell_type, chrom), "w") as out:
for i in range(len(mat)):
point_num1 = points[i].absolute_index
for j in range(i):
if mat[i,j] != 0:
point_num2 = points[j].absolute_index
out.write("\t".join((str(point_num1), str(point_num2), str(mat[i,j]))))
out.write("\n")
out.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,204
|
Lila14/multimds
|
refs/heads/master
|
/scripts/convert_to_bed.py
|
import os
chrom_bins = {}
with open("GSE88952_Sc_Su.32000.bed") as in_file:
for line in in_file:
line = line.strip().split()
chrom_bins[line[3]] = "{}\t{}\t{}".format(line[0], line[1], line[2])
in_file.close()
if not os.path.isfile("ctrl_32kb.bed"):
with open("ctrl_32kb.bed", "w") as out_file:
with open("ctrl_32kb_matrix.txt") as in_file:
for line in in_file:
line = line.strip().split()
bin1 = line[0]
chrom_string1 = chrom_bins[bin1]
bin2 = line[1]
chrom_string2 = chrom_bins[bin2]
if float(line[3]) != 0:
out_file.write("\t".join((chrom_string1, chrom_string2, line[3])))
out_file.write("\n")
in_file.close()
out_file.close()
if not os.path.isfile("galactose_32kb.bed"):
with open("galactose_32kb.bed", "w") as out_file:
with open("galactose_32kb_matrix.txt") as in_file:
for line in in_file:
line = line.strip().split()
bin1 = line[0]
chrom_string1 = chrom_bins[bin1]
bin2 = line[1]
chrom_string2 = chrom_bins[bin2]
if float(line[3]) != 0:
out_file.write("\t".join((chrom_string1, chrom_string2, line[3])))
out_file.write("\n")
in_file.close()
out_file.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,205
|
Lila14/multimds
|
refs/heads/master
|
/scripts/edger_input.py
|
import sys
sys.path.append("..")
import data_tools as dt
import array_tools as at
import numpy as np
def compatible_chroms(paths):
chroms = [dt.chromFromBed(path) for path in paths]
all_min_pos = [chrom.minPos for chrom in chroms]
all_max_pos = [chrom.maxPos for chrom in chroms]
consensus_min = max(all_min_pos)
consensus_max = min(all_max_pos)
for chrom in chroms:
chrom.minPos = consensus_min
chrom.maxPos = consensus_max
return chroms
def fullMatFromBed(path, chrom):
"""Converts BED file to matrix"""
numpoints = (chrom.maxPos - chrom.minPos)/chrom.res + 1
mat = np.zeros((numpoints, numpoints))
with open(path) as infile:
for line in infile:
line = line.strip().split() #line as array of strings
loc1 = int(line[1])
loc2 = int(line[4])
index1 = chrom.getAbsoluteIndex(loc1)
index2 = chrom.getAbsoluteIndex(loc2)
if index1 > index2:
row = index1
col = index2
else:
row = index2
col = index1
mat[row, col] += float(line[6])
infile.close()
at.makeSymmetric(mat)
return mat
res_kb = int(sys.argv[1])
cell_types = ("K562", "GM12878_primary", "GM12878_replicate")
for chrom_name in (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22):
paths = ["hic_data/{}_{}_{}kb.bed".format(cell_type, chrom_name, res_kb) for cell_type in cell_types]
chroms = compatible_chroms(paths)
mats = [fullMatFromBed(path, chrom) for path, chrom in zip(paths, chroms)]
sum_mat = np.sum(mats, 0)
with open("chr{}_{}kb_edgeR_table.tsv".format(chrom_name, res_kb), "w") as out:
out.write("Symbol\t")
out.write("\t".join(cell_types)) #header
out.write("\n")
for i in range(len(sum_mat[0])):
for j in range(i):
if sum_mat[i,j] != 0: #at least one element is non-zero
loc1 = chrom.minPos + chrom.res * j
loc2 = chrom.minPos + chrom.res * i
out.write("chr{}:{},{}\t".format(chrom_name, loc1, loc2)) #identifier
out.write("\t".join([str(mat[i,j]) for mat in mats]))
out.write("\n")
out.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,206
|
Lila14/multimds
|
refs/heads/master
|
/relocalization_peaks.py
|
import numpy as np
import data_tools as dt
import sys
import os
import linear_algebra as la
import array_tools as at
from scipy import signal as sg
from hmmlearn import hmm
import argparse
def call_peaks(data):
"""Calls peaks using Gaussian hidden markov model"""
reshaped_data = data.reshape(-1,1)
model = hmm.GaussianHMM(n_components=2).fit(reshaped_data)
scores = model.predict(reshaped_data)
#determine if peaks are 0 or 1
zero_indices = np.where(scores == 0)
one_indices = np.where(scores == 1)
zero_data = data[zero_indices]
one_data = data[one_indices]
if np.mean(zero_data) > np.mean(one_data):
scores[zero_indices] = 1
scores[one_indices] = 0
#find boundaries of peaks
peaks = []
in_peak = False
for i, score in enumerate(scores):
if in_peak and score == 0: #end of peak
in_peak = False
peak.append(i)
peaks.append(peak)
elif not in_peak and score == 1: #start of peak
in_peak = True
peak = [i]
return peaks
def main():
parser = argparse.ArgumentParser(description="Identify locus-specific changes between Hi-C datasets")
parser.add_argument("path1", help="path to intrachromosomal Hi-C BED file 1")
parser.add_argument("path2", help="path to intrachromosomal Hi-C BED file 2")
parser.add_argument("-N", default=4, help="number of partitions")
parser.add_argument("-m", default=0, help="genomic coordinate of centromere")
parser.add_argument("-s", default=3, help="smoothing parameter for calling relocalization peaks")
parser.add_argument("-x", default="", help="prefix to minimds.py")
args = parser.parse_args()
n = 5
dir1, name1 = args.path1.split("/")
dir2, name2 = args.path2.split("/")
prefix1 = name1.split(".")[0]
prefix2 = name2.split(".")[0]
min_error = sys.float_info.max
for iteration in range(n):
os.system("python {}minimds.py -m {} -N {} -o {}_ {} {}".format(args.x, args.m, args.N, iteration, args.path1, args.path2))
#load structures
structure1 = dt.structure_from_file("{}/{}_{}_structure.tsv".format(dir1, iteration, prefix1))
structure2 = dt.structure_from_file("{}/{}_{}_structure.tsv".format(dir2, iteration, prefix2))
#rescale
structure1.rescale()
structure2.rescale()
#make structures compatible
dt.make_compatible((structure1, structure2))
#align
r, t = la.getTransformation(structure1, structure2)
structure1.transform(r,t)
#calculate error
coords1 = np.array(structure1.getCoords())
coords2 = np.array(structure2.getCoords())
error = np.mean([la.calcDistance(coord1, coord2) for coord1, coord2 in zip(coords1, coords2)])
if error < min_error:
min_error = error
best_iteration = iteration
for iteration in range(n):
if iteration == best_iteration:
#load structures
structure1 = dt.structure_from_file("{}/{}_{}_structure.tsv".format(dir1, iteration, prefix1))
structure2 = dt.structure_from_file("{}/{}_{}_structure.tsv".format(dir2, iteration, prefix2))
else:
os.system("rm {}/{}_{}_structure.tsv".format(dir1, iteration, prefix1))
os.system("rm {}/{}_{}_structure.tsv".format(dir2, iteration, prefix2))
#rescale
structure1.rescale()
structure2.rescale()
#make structures compatible
dt.make_compatible((structure1, structure2))
#tweak alignment
r, t = la.getTransformation(structure1, structure2)
structure1.transform(r,t)
coords1 = np.array(structure1.getCoords())
coords2 = np.array(structure2.getCoords())
dists = [la.calcDistance(coord1, coord2) for coord1, coord2 in zip(coords1, coords2)]
print np.mean(dists)
#smoothed_dists = sg.cwt(dists, sg.ricker, [float(args.s)])[0]
#dist_peaks = call_peaks(smoothed_dists)
dist_peaks = sg.find_peaks_cwt(dists, np.arange(1, 20))
gen_coords = structure1.getGenCoords()
with open("{}_{}_relocalization.bed".format(prefix1, prefix2), "w") as out:
for peak in dist_peaks:
start, end = peak
peak_dists = dists[start:end]
max_dist_index = np.argmax(peak_dists) + start
#out.write("\t".join(("{}".format(structure1.chrom.name), str(gen_coords[start]), str(gen_coords[end]), str(gen_coords[max_dist_index]))))
out.write("\t".join(("{}".format(structure1.chrom.name), str(gen_coords[max_dist_index]), str(gen_coords[max_dist_index] + structure1.chrom.res)))
out.write("\n")
out.close()
if __name__ == "__main__":
main()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,207
|
Lila14/multimds
|
refs/heads/master
|
/scripts/call_peaks.py
|
import numpy as np
import sys
chrom = sys.argv[1]
res = 100000
mat = np.loadtxt("{}_relocalization.tsv".format(chrom))
with open("{}_peaks.bed".format(chrom), "w") as out:
for i, row in enumerate(mat):
if i == 0:
prev = 0
else:
prev = mat[i-1,1]
if i == len(mat) - 1:
next = 0
else:
next = mat[i+1,1]
diff = row[1]
if diff > prev and diff > next and row[2] > 0 and row[3] > 0: #local max in A compartment
out.write("\t".join(("chr{}".format(chrom), str(int(row[0])), str(int(row[0] + res)), str(diff))))
out.write("\n")
out.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,208
|
Lila14/multimds
|
refs/heads/master
|
/scripts/plot_relocalization.py
|
import os
import sys
sys.path.append("/home/lur159/git/miniMDS")
import data_tools as dt
import linear_algebra as la
from matplotlib import pyplot as plt
import numpy as np
gene_name = sys.argv[1]
chrom_num = sys.argv[2]
gene_loc = int(sys.argv[3])
prefix1 = sys.argv[4]
prefix2 = sys.argv[5]
res_kb = 32
max_dists = []
max_gencoords = []
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
for strain in ("Scer", "Suva"):
chrom_name = "{}_{}".format(strain, chrom_num)
os.system("python ~/git/multimds/multimds.py --full -P 0.1 -w 0 {}_{}_{}kb.bed {}_{}_{}kb.bed".format(prefix1, chrom_name, res_kb, prefix2, chrom_name, res_kb))
struct1 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(prefix1, chrom_name, res_kb))
struct2 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(prefix2, chrom_name, res_kb))
dists = [la.calcDistance(coord1, coord2) for coord1, coord2 in zip(struct1.getCoords(), struct2.getCoords())]
max_dists.append(max(dists))
max_gencoords.append(max(struct1.getGenCoords()))
plt.plot(struct1.getGenCoords(), dists, label=strain, lw=4)
x_int_size = 200000
ys = dists
y_int_size = 0.01
x_start = -x_int_size/4.
x_end = max(max_gencoords) + x_int_size/5.
y_start = -y_int_size/5.
y_end = max(max_dists) + y_int_size/5.
plt.title("chr{}".format(chrom_num), fontsize=14)
plt.xlabel("Genomic coordinate", fontsize=14)
plt.ylabel("Relocalization", fontsize=14)
plt.axis([x_start, x_end, y_start, y_end],frameon=False)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=6)
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=10)
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=10)
gen_coord = struct1.getGenCoords()[struct1.get_rel_index(gene_loc)]
plt.scatter([gen_coord], [0.005], c="g", s=50, marker="*")
plt.annotate(gene_name, (gen_coord+20000, 0.005))
plt.legend()
plt.show()
#plt.savefig(gene_name)
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,209
|
Lila14/multimds
|
refs/heads/master
|
/scripts/wig_to_bed.py
|
""""Convert fixedStep wig to binned bed"""
import sys
sys.path.append("..")
from tools import Tracker
wig = sys.argv[1]
bin_size = int(sys.argv[2])
file_size = int(sys.argv[3])
prefix = wig.split(".")[0]
tracker = Tracker("Converting {}".format(wig), file_size)
tot = 0
count = 0
with open(wig) as in_file:
with open("{}_{}kb.bed".format(prefix, bin_size/1000), "w") as out_file:
for line in in_file:
line = line.strip().split()
if line[0] == "fixedStep": #header
chrom = line[1].split("=")[1]
curr_pos = int(line[2].split("=")[1])
step = int(line[3].split("=")[1])
span = int(line[4].split("=")[1])
else:
tot += float(line[0])
count += span
if curr_pos%bin_size == 0:
if count == 0:
avg = 0
else:
avg = tot/count
out_file.write("\t".join((chrom, str(curr_pos-bin_size), str(curr_pos), str(avg))))
out_file.write("\n")
tot = 0 #re-initialize
count = 0
curr_pos += step
tracker.increment()
out_file.close()
in_file.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,210
|
Lila14/multimds
|
refs/heads/master
|
/scripts/superenhancer_pie.py
|
from matplotlib import pyplot as plt
import sys
from scipy import stats as st
plt.pie((int(sys.argv[1]), int(sys.argv[2])), labels=("Enhancer", "No enhancer"))
plt.title("Relocalization peaks")
plt.savefig("relocalization_superenhancer_pie")
plt.close()
plt.pie((int(sys.argv[3]), int(sys.argv[4])), labels=("Enhancer", "No enhancer"))
plt.title("Background A compartment")
plt.savefig("background_superenhancer_pie")
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,211
|
Lila14/multimds
|
refs/heads/master
|
/scripts/test_multimds.py
|
import sys
sys.path.append("..")
import data_tools as dt
import numpy as np
from joint_mds import Joint_MDS
chrom = sys.argv[1]
res_kb = 100
prefix1 = "GM12878_combined"
prefix2 = "K562"
path1 = "hic_data/{}_{}_{}kb.bed".format(prefix1, chrom, res_kb)
path2 = "hic_data/{}_{}_{}kb.bed".format(prefix2, chrom, res_kb)
structure1 = dt.structureFromBed(path1, None, None)
structure2 = dt.structureFromBed(path2, None, None)
#make structures compatible
dt.make_compatible((structure1, structure2))
#get distance matrices
dists1 = dt.normalized_dist_mat(path1, structure1)
dists2 = dt.normalized_dist_mat(path2, structure2)
#joint MDS
coords1, coords2 = Joint_MDS(n_components=3, p=0.05, random_state1=np.random.RandomState(), random_state2=np.random.RandomState(), dissimilarity="precomputed", n_jobs=-1).fit_transform(dists1, dists2)
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,212
|
Lila14/multimds
|
refs/heads/master
|
/scripts/test_quantify_z.py
|
from sklearn import svm
import numpy as np
import sys
sys.path.append("..")
import data_tools as dt
import compartment_analysis as ca
from matplotlib import pyplot as plt
import os
import linear_algebra as la
import array_tools as at
from scipy import stats as st
#import plotting as plot
res_kb = 100
cell_type1 = sys.argv[1]
cell_type2 = sys.argv[2]
chroms = range(1, int(sys.argv[3]))
x_means = []
y_means = []
z_means = []
x_lengths = []
y_lengths = []
z_lengths = []
for chrom in chroms:
path1 = "hic_data/{}_{}_{}kb.bed".format(cell_type1, chrom, res_kb)
path2 = "hic_data/{}_{}_{}kb.bed".format(cell_type2, chrom, res_kb)
if os.path.isfile(path1) and os.path.isfile(path2):
os.system("python ../multimds.py --full -w 0 {} {}".format(path1, path2))
structure1 = dt.structure_from_file("hic_data/{}_{}_{}kb_structure.tsv".format(cell_type1, chrom, res_kb))
structure2 = dt.structure_from_file("hic_data/{}_{}_{}kb_structure.tsv".format(cell_type2, chrom, res_kb))
#plot.plot_structures_interactive((structure1, structure2))
#compartments
contacts1 = dt.matFromBed(path1, structure1)
contacts2 = dt.matFromBed(path2, structure2)
at.makeSymmetric(contacts1)
at.makeSymmetric(contacts2)
compartments1 = np.array(ca.get_compartments(contacts1))
compartments2 = np.array(ca.get_compartments(contacts2))
r, p = st.pearsonr(compartments1, compartments2)
if r < 0:
compartments2 = -compartments2
#SVR
coords1 = structure1.getCoords()
coords2 = structure2.getCoords()
coords = np.concatenate((coords1, coords2))
compartments = np.concatenate((compartments1, compartments2))
clf = svm.LinearSVR()
clf.fit(coords, compartments)
coef = clf.coef_
transformed_coords1 = np.array(la.change_coordinate_system(coef, coords1))
transformed_coords2 = np.array(la.change_coordinate_system(coef, coords2))
x_diffs = transformed_coords1[:,0] - transformed_coords2[:,0]
y_diffs = transformed_coords1[:,1] - transformed_coords2[:,1]
z_diffs = transformed_coords1[:,2] - transformed_coords2[:,2]
x_means.append(np.mean(np.abs(x_diffs)))
y_means.append(np.mean(np.abs(y_diffs)))
z_means.append(np.mean(np.abs(z_diffs)))
#axis lengths
centroid1 = np.mean(transformed_coords1, axis=0)
centroid2 = np.mean(transformed_coords2, axis=0)
x_length1 = np.mean([np.abs(coord1[0] - centroid1[0]) for coord1 in transformed_coords1])
y_length1 = np.mean([np.abs(coord1[1] - centroid1[1]) for coord1 in transformed_coords1])
z_length1 = np.mean([np.abs(coord1[2] - centroid1[2]) for coord1 in transformed_coords1])
x_length2 = np.mean([np.abs(coord2[0] - centroid2[0]) for coord2 in transformed_coords2])
y_length2 = np.mean([np.abs(coord2[1] - centroid2[1]) for coord2 in transformed_coords2])
z_length2 = np.mean([np.abs(coord2[2] - centroid2[2]) for coord2 in transformed_coords2])
x_lengths.append(np.mean((x_length1, x_length2)))
y_lengths.append(np.mean((y_length1, y_length2)))
z_lengths.append(np.mean((z_length1, z_length2)))
x_fractions = []
y_fractions = []
z_fractions = []
for x_mean, y_mean, z_mean in zip(x_means, y_means, z_means):
tot = x_mean + y_mean + z_mean
x_fractions.append(x_mean/tot)
y_fractions.append(y_mean/tot)
z_fractions.append(z_mean/tot)
print(np.mean(z_fractions))
x_length_fractions = []
y_length_fractions = []
z_length_fractions = []
for x_length, y_length, z_length in zip(x_lengths, y_lengths, z_lengths):
tot = x_length + y_length + z_length
x_length_fractions.append(x_length/tot)
y_length_fractions.append(y_length/tot)
z_length_fractions.append(z_length/tot)
print(x_fractions)
print(y_fractions)
print(z_fractions)
ind = np.arange(len(chroms)) # the x locations for the groups
width = 0.2 # the width of the bars
plt.boxplot([x_fractions, y_fractions, z_fractions], labels=["Orthogonal 1", "Orthogonal 2", "Compartment"])
plt.ylabel("Fractional change")
plt.savefig("{}_{}_change_by_axis".format(cell_type1, cell_type2))
#plt.show()
plt.close()
plt.boxplot([x_length_fractions, y_length_fractions, z_length_fractions], labels=["Orthogonal 1", "Orthogonal 2", "Compartment"])
plt.ylabel("Fractional length")
plt.savefig("{}_{}_axis_length".format(cell_type1, cell_type2))
#plt.show()
plt.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,213
|
Lila14/multimds
|
refs/heads/master
|
/scripts/relocalization_peaks.py
|
import numpy as np
import sys
sys.path.append("..")
import data_tools as dt
import compartment_analysis as ca
import os
import linear_algebra as la
import array_tools as at
from scipy import signal as sg
from hmmlearn import hmm
def normalize(values):
return np.array(values)/max(values)
def format_celltype(cell_type):
if cell_type == "KBM7":
return "K562" #substitute
else:
formatted = cell_type.split("_")[0]
return formatted[0].upper() + formatted[1:len(formatted)].lower()
def call_peaks(data):
"""Calls peaks using Gaussian hidden markov model"""
reshaped_data = data.reshape(-1,1)
model = hmm.GaussianHMM(n_components=2).fit(reshaped_data)
scores = model.predict(reshaped_data)
#determine if peaks are 0 or 1
zero_indices = np.where(scores == 0)
one_indices = np.where(scores == 1)
zero_data = data[zero_indices]
one_data = data[one_indices]
if np.mean(zero_data) > np.mean(one_data):
scores[zero_indices] = 1
scores[one_indices] = 0
#find boundaries of peaks
peaks = []
in_peak = False
for i, score in enumerate(scores):
if in_peak and score == 0: #end of peak
in_peak = False
peak.append(i)
peaks.append(peak)
elif not in_peak and score == 1: #start of peak
in_peak = True
peak = [i]
return peaks
cell_type1 = sys.argv[1]
cell_type2 = sys.argv[2]
chrom = sys.argv[3]
#centromere = sys.argv[4]
#num_partitions = sys.argv[5]
smoothing_parameter = float(sys.argv[6])
res = int(sys.argv[7])
res_kb = res/1000
#n = 1
#path1 = "hic_data/{}_{}_{}kb_filtered.bed".format(cell_type1, chrom, res_kb)
#path2 = "hic_data/{}_{}_{}kb_filtered.bed".format(cell_type2, chrom, res_kb)
path1 = "hic_data/{}_{}_{}kb.bed".format(cell_type1, chrom, res_kb)
path2 = "hic_data/{}_{}_{}kb.bed".format(cell_type2, chrom, res_kb)
#min_error = sys.float_info.max
#for iteration in range(n):
#os.system("python ../multimds.py -m {} -N {} -o {}_ {} {}".format(centromere, num_partitions, iteration, path1, path2))
os.system("python ../multimds.py {} {}".format(path1, path2))
#load structures
#structure1 = dt.structure_from_file("/data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type1, chrom, res_kb))
#structure2 = dt.structure_from_file("/data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type2, chrom, res_kb))
structure1 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(cell_type1, chrom, res_kb))
structure2 = dt.structure_from_file("{}_{}_{}kb_structure.tsv".format(cell_type2, chrom, res_kb))
#rescale
structure1.rescale()
structure2.rescale()
#make structures compatible
dt.make_compatible((structure1, structure2))
#align
r, t = la.getTransformation(structure1, structure2)
structure1.transform(r,t)
#calculate error
#coords1 = np.array(structure1.getCoords())
#coords2 = np.array(structure2.getCoords())
#error = np.mean([la.calcDistance(coord1, coord2) for coord1, coord2 in zip(coords1, coords2)])
#if error < min_error:
# min_error = error
# best_iteration = iteration
#for iteration in range(n):
# if iteration == best_iteration:
#load structures
# structure1 = dt.structure_from_file("/data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type1, chrom, res_kb))
# structure2 = dt.structure_from_file("/data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type2, chrom, res_kb))
# else:
# os.system("rm /data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type1, chrom, res_kb))
# os.system("rm /data/drive1/test/archive/multimds/scripts/hic_data/{}_{}_{}_{}kb_filtered_structure.tsv".format(iteration, cell_type2, chrom, res_kb))
#rescale
structure1.rescale()
structure2.rescale()
#make structures compatible
dt.make_compatible((structure1, structure2))
#align
r, t = la.getTransformation(structure1, structure2)
structure1.transform(r,t)
#calculate error
coords1 = np.array(structure1.getCoords())
coords2 = np.array(structure2.getCoords())
dists = [la.calcDistance(coord1, coord2) for coord1, coord2 in zip(coords1, coords2)]
print np.mean(dists)
#compartments
contacts1 = dt.matFromBed(path1, structure1)
contacts2 = dt.matFromBed(path2, structure2)
at.makeSymmetric(contacts1)
at.makeSymmetric(contacts2)
enrichments = np.array(np.loadtxt("binding_data/Gm12878_{}_{}kb_active_coverage.bed".format(chrom, res_kb), dtype=object)[:,6], dtype=float)
bin_nums = structure1.nonzero_abs_indices() + structure1.chrom.minPos/structure1.chrom.res
enrichments = enrichments[bin_nums]
compartments1 = np.array(ca.get_compartments(contacts1, enrichments))
enrichments = np.array(np.loadtxt("binding_data/K562_{}_{}kb_active_coverage.bed".format(chrom, res_kb), dtype=object)[:,6], dtype=float)
bin_nums = structure1.nonzero_abs_indices() + structure1.chrom.minPos/structure1.chrom.res
enrichments = enrichments[bin_nums]
compartments2 = np.array(ca.get_compartments(contacts2, enrichments))
gen_coords = structure1.getGenCoords()
dists = normalize(dists)
compartment_diffs = np.abs(compartments1 - compartments2)
compartment_diffs = normalize(compartment_diffs)
smoothed_dists = sg.cwt(dists, sg.ricker, [smoothing_parameter])[0]
dist_peaks = call_peaks(smoothed_dists)
smoothed_diffs = sg.cwt(compartment_diffs, sg.ricker, [smoothing_parameter])[0]
diff_peaks = call_peaks(smoothed_diffs)
gen_coords = structure1.getGenCoords()
with open("{}_dist_peaks.bed".format(chrom), "w") as out:
for peak in dist_peaks:
start, end = peak
peak_dists = dists[start:end]
max_dist_index = np.argmax(peak_dists) + start
#out.write("\t".join(("{}".format(structure1.chrom.name), str(gen_coords[start]), str(gen_coords[end]), str(gen_coords[max_dist_index]))))
out.write("\t".join((structure1.chrom.name, str(gen_coords[max_dist_index]), str(gen_coords[max_dist_index] + structure1.chrom.res), str(compartments1[max_dist_index]), str(compartments2[max_dist_index]))))
out.write("\n")
out.close()
with open("{}_comp_peaks.bed".format(chrom), "w") as out:
for peak in diff_peaks:
start, end = peak
peak_diffs = compartment_diffs[start:end]
max_diff_index = np.argmax(peak_diffs) + start
out.write("\t".join((structure1.chrom.name, str(gen_coords[max_diff_index]), str(gen_coords[max_diff_index] + structure1.chrom.res))))
#out.write("\t".join((structure1.chrom.name, str(gen_coords[peak]), str(gen_coords[peak] + structure1.chrom.res))))
out.write("\n")
out.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,214
|
Lila14/multimds
|
refs/heads/master
|
/scripts/differential_tad_boundaries.py
|
cell_type1 = "GM12878_combined"
cell_type2 = "K562"
res = 100000
boundaries = []
with open("{}_tadlib_output.txt".format(cell_type1)) as in_file:
for line in in_file:
line = line.split()
boundary1 = line[0] + "-" + line[1]
if boundary1 not in boundaries:
boundaries.append(boundary1)
boundary2 = line[0] + "-" + line[2]
if boundary2 not in boundaries:
boundaries.append(boundary2)
in_file.close()
unique = []
with open("{}_tadlib_output.txt".format(cell_type2)) as in_file:
for line in in_file:
line = line.split()
boundary1 = line[0] + "-" + line[1]
if boundary1 not in boundaries and boundary1 not in unique:
unique.append(boundary1)
boundary2 = line[0] + "-" + line[2]
if boundary2 not in boundaries and boundary2 not in unique:
unique.append(boundary2)
in_file.close()
with open("{}_{}_{}kb_differential_tad_boundaries.bed".format(cell_type1, cell_type2, res/1000), "w") as out_file:
for boundary in unique:
chrom, loc = boundary.split("-")
out_file.write("\t".join(("chr{}".format(chrom), loc, str(int(loc) + res))))
out_file.write("\n")
out_file.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,215
|
Lila14/multimds
|
refs/heads/master
|
/joint_mds.py
|
"""
Jointly perform multi-dimensional Scaling (MDS) on two datasets
"""
# original author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# modified by: Lila Rieber <lur159@psu.edu>
# License: BSD
import numpy as np
import sys
import warnings
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances
from sklearn.utils import check_random_state, check_array, check_symmetric
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.isotonic import IsotonicRegression
def squared_dist(x1, x2):
"""Computes squared Euclidean distance between coordinate x1 and coordinate x2"""
return sum([(i1 - i2)**2 for i1, i2 in zip(x1, x2)])
def ssd(X1, X2):
"""Computes sum of squared distances between coordinates X1 and coordinates X2"""
return sum([squared_dist(x1, x2) for x1, x2 in zip(X1, X2)])
def moore_penrose(V):
"""Computes Moore-Penrose inverse of matrix V"""
n = len(V)
return np.linalg.inv(V + np.ones((n,n))) - n**-2 * np.ones((n,n))
def initialize(dissimilarities, random_state, init, n_samples, n_components):
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
return X, sim_flat, sim_flat_w
def nonmetric_disparities(dis, sim_flat, n_samples):
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
return disparities
def guttman(X1, X2, disparities, inv_V, V2, dis):
# avoid division by 0
dis[dis == 0] = 1e-5
# B: error between distance matrix and embedding
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
return np.dot(inv_V, (np.dot(B, X1) + np.dot(V2, X2)))
def _smacof_single(dissimilarities1, dissimilarities2, p, weights1=None, weights2=None, metric=True, n_components=2,
init1=None, init2=None, max_iter=300, verbose=0, eps=1e-3,
random_state1=None, random_state2=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities1 = check_symmetric(dissimilarities1, raise_exception=True)
dissimilarities2 = check_symmetric(dissimilarities2, raise_exception=True)
if dissimilarities1.shape != dissimilarities2.shape:
print("Error. Distance matrices have different shapes.")
sys.exit("Error. Distance matrices have different shapes.")
n_samples = dissimilarities1.shape[0]
X1, sim_flat1, sim_flat_w1 = initialize(dissimilarities1, random_state1,
init1, n_samples, n_components)
X2, sim_flat2, sim_flat_w2 = initialize(dissimilarities2, random_state2,
init2, n_samples, n_components)
#Default: equal weights
if weights1 is None:
weights1 = np.ones((n_samples, n_samples))
if weights2 is None:
weights2 = np.ones(n_samples)
# Disparity-specific weights (V in Borg)
V1 = np.zeros((n_samples,n_samples))
for i in range(n_samples):
diagonal = 0
for j in range(n_samples):
V1[i,j] = -weights1[i,j]
diagonal += weights1[i,j]
V1[i,i] = diagonal
# Locus-specific weights
V2 = np.zeros((n_samples,n_samples))
for i, weight in enumerate(weights2):
V2[i,i] = weight * p * n_samples
inv_V = moore_penrose(V1+V2)
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis1 = euclidean_distances(X1)
dis2 = euclidean_distances(X2)
if metric:
disparities1 = dissimilarities1
disparities2 = dissimilarities2
else:
disparities1 = nonmetric_disparities1(dis1, sim_flat1, n_samples)
disparities2 = nonmetric_disparities2(dis2, sim_flat2, n_samples)
# Compute stress
stress = ((dis1.ravel() - disparities1.ravel()) ** 2).sum() + ((dis2.ravel() - disparities2.ravel()) ** 2).sum() + n_samples * p * ssd(X1, X2) #multiply by n_samples to make ssd term comparable in magnitude to embedding error terms
# Update X1 using the Guttman transform
X1 = guttman(X1, X2, disparities1, inv_V, V2, dis1)
# Update X2 using the Guttman transform
X2 = guttman(X2, X1, disparities2, inv_V, V2, dis2)
# Test stress
dis1 = np.sqrt((X1 ** 2).sum(axis=1)).sum()
dis2 = np.sqrt((X2 ** 2).sum(axis=1)).sum()
dis = np.mean((dis1, dis2))
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if np.abs(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X1, X2, stress, it + 1
def smacof(dissimilarities1, dissimilarities2, p, weights1, weights2, metric=True, n_components=2, init1=None, init2=None,
n_init=8, n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state1=None, random_state2=None,
return_n_iter=False):
"""
Computes multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, optional, default: 8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For ``n_jobs`` below -1,
(``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs
but one are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize the centers. If an integer is given,
it fixes the seed. Defaults to the global numpy random number
generator.
return_n_iter : bool, optional, default: False
Whether or not to return the number of iterations.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
if p < 0:
sys.exit('Error. Penalty must be non-negative.')
dissimilarities1 = check_array(dissimilarities1)
dissimilarities2 = check_array(dissimilarities2)
random_state1 = check_random_state(random_state1)
random_state2 = check_random_state(random_state2)
if hasattr(init1, '__array__'):
init1 = np.asarray(init1).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of {}'.format(n_init))
n_init = 1
if hasattr(init2, '__array__'):
init2 = np.asarray(init2).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of {}'.format(n_init))
n_init = 1
best_pos1, best_pos2, best_stress = None, None, None
if n_jobs == 1:
for it in range(n_init):
pos1, pos2, stress, n_iter_ = _smacof_single(
dissimilarities1, dissimilarities2, p, metric=metric,
n_components=n_components, init1=init1,
init2=init2, max_iter=max_iter,
verbose=verbose, eps=eps, random_state1=random_state1,
random_state2=random_state2)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos1 = pos1.copy()
best_pos2 = pos2.copy()
best_iter = n_iter_
else:
seeds1 = random_state1.randint(np.iinfo(np.int32).max, size=n_init)
seeds2 = random_state2.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities1, dissimilarities2, p, weights1=weights1, weights2=weights2, metric=metric,
n_components=n_components, init1=init1, init2=init2,
max_iter=max_iter, verbose=verbose, eps=eps,
random_state1=seed1, random_state2=seed2)
for seed1, seed2 in zip(seeds1, seeds2))
positions1, positions2, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos1 = positions1[best]
best_pos2 = positions2[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos1, best_pos2, best_stress, best_iter
else:
return best_pos1, best_pos2, best_stress
class Joint_MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities.
metric : boolean, optional, default: True
If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
n_init : int, optional, default: 4
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For ``n_jobs`` below -1,
(``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs
but one are used.
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize the centers. If an integer is given,
it fixes the seed. Defaults to the global numpy random number
generator.
dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean'
Dissimilarity measure to use:
- 'euclidean':
Pairwise Euclidean distances between points in the dataset.
- 'precomputed':
Pre-computed dissimilarities are passed directly to ``fit`` and
``fit_transform``.
Attributes
----------
embedding_ : array-like, shape (n_components, n_samples)
Stores the position of the dataset in the embedding space.
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, weights1=None, weights2=None, p=0, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state1=None, random_state2=None,
dissimilarity="euclidean"):
self.n_components = n_components
self.weights1 = weights1
self.weights2 = weights2
self.p = p
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state1 = random_state1
self.random_state2 = random_state2
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X1, X2, weights1=None, weights2=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
self.fit_transform(X1, X2, weights1=weights1, weights2=weights2, init=init)
return self
def fit_transform(self, X1, X2, weights1=None, weights2=None, init1=None, init2=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
X1 = check_array(X1)
if X1.shape[0] == X1.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs a"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix1_ = X1
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix1_ = euclidean_distances(X1)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
X2 = check_array(X2)
if X2.shape[0] == X2.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs a"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix2_ = X2
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix2_ = euclidean_distances(X2)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding1_, self.embedding2_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix1_, self.dissimilarity_matrix2_, p=self.p, weights1=self.weights1,
weights2=self.weights2, metric=self.metric, n_components=self.n_components, init1=init1, init2=init2,
n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state1=self.random_state1, random_state2=self.random_state2,
return_n_iter=True)
return self.embedding1_, self.embedding2_
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,216
|
Lila14/multimds
|
refs/heads/master
|
/scripts/get_a_compartment.py
|
import sys
sys.path.append("..")
import compartment_analysis as ca
import data_tools as dt
import array_tools as at
import os
import numpy as np
res = int(sys.argv[1])
res_kb = res/1000
if os.path.isfile("A_compartment_{}kb.bed".format(res_kb)):
os.system("rm A_compartment_{}kb.bed".format(res_kb))
for chrom in (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22):
path = "hic_data/GM12878_combined_{}_100kb.bed".format(chrom)
structure = dt.structureFromBed(path)
contacts = dt.matFromBed(path, structure)
at.makeSymmetric(contacts)
enrichments = np.array(np.loadtxt("binding_data/Gm12878_{}_100kb_active_coverage.bed".format(chrom), dtype=object)[:,6], dtype=float)
bin_nums = structure.nonzero_abs_indices() + structure.chrom.minPos/structure.chrom.res
enrichments = enrichments[bin_nums]
compartments = np.array(ca.get_compartments(contacts, enrichments))
gen_coords = np.array(structure.getGenCoords())
a_gen_coords = gen_coords[np.where(compartments > 0)]
with open("A_compartment_{}kb.bed".format(res_kb), "a") as out:
for a_gen_coord in a_gen_coords:
for i in range(100/res_kb):
out.write("\t".join((structure.chrom.name, str(a_gen_coord + i*structure.chrom.res), str(a_gen_coord + (i+1)*structure.chrom.res))))
out.write("\n")
out.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,217
|
Lila14/multimds
|
refs/heads/master
|
/scripts/ttest.py
|
import numpy as np
from scipy import stats as st
import sys
from matplotlib import pyplot as plt
mat1 = np.loadtxt(sys.argv[1], dtype=object)
enrichments1 = np.array(mat1[:,6], dtype=float)
mat2 = np.loadtxt(sys.argv[2], dtype=object)
enrichments2 = np.array(mat2[:,6], dtype=float)
print st.ttest_ind(enrichments1, enrichments2)
xs = enrichments1
#need to know bins to get y range
bins = plt.hist(xs)
plt.close()
#start with a frameless plot (extra room on the left)
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
#label axes
plt.xlabel("GM12878 enhancer coverage", fontsize=14)
plt.title("Relocalized", fontsize=14)
#define offsets
xmin = min(xs)
xmax = max(xs)
x_range = xmax - xmin
x_start = xmin - x_range/25. #bigger offset for bar plot
x_end = xmax + x_range/25.
ymin = 0
ymax = max(bins[0])
y_range = ymax - ymin
#y_start = ymin - y_range/25.
y_start = 0
y_end = ymax + y_range/25.
#plot
plt.hist(xs, rwidth=0.8, bottom=y_start)
#define axes with offsets
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
#plot axes (black with line width of 4)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=4)
#plot ticks
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=12)
plt.savefig("relocalization_enhancer_coverage")
plt.close()
xs = enrichments2
#need to know bins to get y range
bins = plt.hist(xs)
plt.close()
#start with a frameless plot (extra room on the left)
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
#label axes
plt.xlabel("GM12878 enhancer coverage", fontsize=14)
plt.title("Background", fontsize=14)
#define offsets
xmin = min(xs)
xmax = max(xs)
x_range = xmax - xmin
x_start = xmin - x_range/25. #bigger offset for bar plot
x_end = xmax + x_range/25.
ymin = 0
ymax = max(bins[0])
y_range = ymax - ymin
#y_start = ymin - y_range/25.
y_start = 0
y_end = ymax + y_range/25.
#plot
plt.hist(xs, rwidth=0.8, bottom=y_start)
#define axes with offsets
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
#plot axes (black with line width of 4)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=4)
#plot ticks
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=12)
plt.savefig("background_enhancer_coverage")
plt.close()
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,218
|
Lila14/multimds
|
refs/heads/master
|
/scripts/enhancer_pie.py
|
from matplotlib import pyplot as plt
import sys
plt.pie((int(sys.argv[1]), int(sys.argv[2])), labels=("Enhancer", "No enhancer"))
plt.title("Relocalization peaks")
plt.savefig("relocalization_enhancer_pie")
plt.close()
plt.pie((int(sys.argv[3]), int(sys.argv[4])), labels=("Enhancer", "No enhancer"))
plt.title("Background A compartment")
plt.savefig("background_enhancer_pie")
|
{"/scripts/test_multimds.py": ["/joint_mds.py"]}
|
21,219
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/NetworkInfo.py
|
import socket
from lib.windows.common.CommandHandler import CommandHandler
from uuid import getnode as get_mac
from lib.windows.common import Utility as utl
from lib.windows import SystemInfo
#import SystemInfo
import re
class NetworkInfo:
'''
class Name:NetworkInfo
Description: used to Find out network related information using ipconfig /all and os module
To get All Network information call this method
objectName.networkinfo()
'''
def __init__(self):
self.cmd=CommandHandler()
def getIpConfig(self):
''' This Method returns the list of avialble intefaaces which is shown in
ipconfig /all
call this Method
objectName.getIpConfig()
'''
try:
cmd=["ipconfig", "/all"]
results=self.cmd.getCmdOutput(cmd)
return results.splitlines()
except:
return None
def getNetworkName(self):
'''
This method retuns an machine host name in Network
call this Method
objectName.getNetworkName()
'''
try:
s1=SystemInfo.SystemInfo()
return s1.getMachineName()
except:
return None
def getIpAddress(self):
'''
This method retuns an machine Ip Address
call this Method
objectName.getIpAddress()
'''
try:
return socket.gethostbyname(socket.gethostname())
except Exception as ex:
return None
def getMacAddress(self):
'''
This method retuns an machine MAC Address
call this Method
objectName.getMacAddress()
'''
try:
mac = get_mac()
macid=':'.join(("%012X" % mac)[i:i+2] for i in range(0, 12, 2))
return macid
except Exception as ex:
return None
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def networkinfo(self):
'''
This method retuns Complete Network Related Information
call this Method
objectName.networkinfo()
'''
network_info={}
ipandmacAddress={}
ipandmacAddress['HostNodeName']=self.getNetworkName()
ipandmacAddress['IpAddress']=self.getIpAddress()
ipandmacAddress['MacAddress']=self.getMacAddress()
network_info['ipandmacAddress']=[ipandmacAddress]
network_categories=['netclient','NETPROTOCOL','nic','RDNIC','NICCONFIG']
for part in network_categories:
network_info[part]=self.Preprocess(part)
return network_info
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,220
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/HardwareInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
from lib.windows.common.RegistryHandler import RegistryHandler
from lib.windows.common import Utility as utl
class HardwareInfo:
'''
class_Name:HardwareInfo
Output:Return bios,cpu,usb information
Functions:
getBiosInfo()
getCpuInfo(self)
usbPortInfo(self)
'''
def __init__(self):
self.cmd=CommandHandler()
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def getBiosInfo(self):
'''
Usage :object.getBiosInfo()
Find Bios Info and Return Dictionary Object
Output:
biosinfo--> An Dictionary Object
Sample-->{'Manufacturer': 'XXX',
'SerialNumber': 'XXXXXXXXXXX',
'SMBIOSBIOSVe': 'XXXXXXXX
}
'''
biosinfo=self.Preprocess('bios')
return biosinfo
def CsProduct(self):
computer_systemP=self.Preprocess('CSPRODUCT')
return computer_systemP
def getCpuInfo(self):
cpuinfo=self.Preprocess('cpu')
return cpuinfo
def getBaseboard(self):
Baseboard=self.Preprocess('BASEBOARD')
return Baseboard
def usbPortInfo(self):
'''
Usage :object.usbPortInfo()
Find USB Port Info and Return Dictionary Object
Output:
cpuinfo--> An Dictionary Object
Sample-->{'ROOT_HUB2': 2, 'ROOT_HUB3': 1}
'''
Usb_List={}
key='HLM' #HKEY_LOCAL_MACHINE
for i in ['ROOT_HUB20','ROOT_HUB30']:
path=r'SYSTEM\CurrentControlSet\Enum\USB\{}'.format(i)
reg_=RegistryHandler(key,path)
count=reg_.getKeys()
Usb_List[i[:-1]]=count
return Usb_List
def getHardwareinfo(self):
'''
usage:object.getHardwareinfo()
Return bios,cpu,usb information
'''
hardwarinfo={
'usb':[self.usbPortInfo()]
}
Hardware_parameter=['onboarddevice','bios','cpu','BASEBOARD','CSPRODUCT','PORTCONNECTOR','SYSTEMSLOT']
for part in Hardware_parameter:
hardwarinfo[part]=self.Preprocess(part)
return hardwarinfo
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,221
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/ServiceInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
from lib.windows.common import Utility as utl
class ServiceInfo:
def __init__(self):
self.cmd=CommandHandler()
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def getServiceInfo(self):
Service_info={}
Service_list=['LOADORDER','PROCESS','RDACCOUNT']
for part in Service_list:
Service_info[part]=self.Preprocess(part)
return Service_info
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,222
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_browsers.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
class get_browsers:
'''
********* THIS SCRIPT RETURNS A LIST CONTAINING BROWSERS INSTALLED ON USER'S LINUX SYSTEM *********
CLASS get_browsers DOCINFO:
get_browsers HAVE TWO FUNCTIONS I.E.,
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
'''
self.command_output = "" # TO SAVE DATA RECIEVED FROM COMMAND INTO A STRING
self.browsers = [] # FOR SAVING BROWSER DATA COLLECTED INTO A SINGLE VARIABLE
self.data = "" # TO SAVE FINAL OUTPUT TO WRITE IN FILE
self.current_path = os.getcwd() # TO SAVE CURRENT DIRECTORY PATH
def work(self):
'''
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
ret_data = {"List of Installed Browsers":[]}
self.command_output = os.popen("apropos 'web browser'").read() # COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
self.browsers = self.command_output.split('\n') # SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST
self.browsers.pop() # REMOVING LAST REDUNDANT ELEMENT
self.browsers = [i[:i.find('(')-1] for i in self.browsers] # REFINING NAME FROM THE LIST WE GET
self.data = "S.No,Browser Name\n"
for i in self.browsers:
self.data += str(self.browsers.index(i)+1)+","+str(i)+"\n"
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("Installed Browser.csv","w") as browser: # SAVNG DATA INTO FILE
browser.write(self.data)
self.browsers.insert(0,"Installed Browsers")
for i in self.browsers:
ret_data["List of Installed Browsers"].append([i])
return ret_data # RETURNING THE LIST
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,223
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/MiscInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
from lib.windows.common import Utility as utl
class MiscInfo:
def __init__(self):
self.cmd=CommandHandler()
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def getMiscInfo(self):
misc_info={}
misc_list=['ENVIRONMENT','GROUP','LOGON','REGISTRY','SYSACCOUNT','USERACCOUNT']
for part in misc_list:
misc_info[part]=self.Preprocess(part)
return misc_info
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,224
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_network_info.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
from tabulate import tabulate
class get_network_info:
'''
CLASS get_network_info PROVIDES THE CURRENT NETWORK CONNECTION STATUS, IP ADDRESS, NET MASK ADDRESS AND BROADCAST ADDRESS ALONGWITH ALL INTERFACE STATS.
get_net_info HAVE TWO METHODS:
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK HOLDS ALL INITIALISED/UNINITIALISED ATTRIBUTES WHICH ARE GOING TO BE LATER IN THE WORK FUNCTION.
work() DOCFILE:
work() RETURNS A SIBGLE STRING CONTAINING FORMATTED NETWORK INFORMATION CONTAINING IP ADDRESSES, INTERFACE DATA AND MAC ADDRESSES
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK HOLDS ALL INITIALISED/UNINITIALISED ATTRIBUTES WHICH ARE GOING TO BE LATER IN THE WORK FUNCTION.
'''
self.data = "" # FINAL DATA WOULD BE SAVED IN THIS VARIABLE IN FORMATTED WAY
self.current_path = os.getcwd() # TO SAVE CURRENT DIRECTORY PATH
def work(self):
'''
work() DOCFILE:
work() RETURNS A SIBGLE STRING CONTAINING FORMATTED NETWORK INFORMATION CONTAINING IP ADDRESSES, INTERFACE DATA AND MAC ADDRESSES
'''
ret_data = {}
temp_list = []
temp_key = ""
self.data += os.popen("nmcli -p device show").read() # GETTING DATA FROM COMMAND LINE
self.data = self.data.replace("-","")
self.data = self.data.replace("GENERAL.","")
## REMOVinG EXTRA LINES WITH NO LETTERS
for i in self.data.split('\n'):
if i != '' and i.find('=') == -1:
if i.find('Device details') != -1:
temp_key = i.split('(')[1].split(')')[0]
ret_data[temp_key] = [["Property", "Value"]]
elif i.split(':')[1].strip() is not '':
ret_data[temp_key].append([i.split(':')[0],i.split(':')[1].strip()])
# if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
# self.current_path += "/output/"
# os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
# with open("network_info.txt","w") as network: # SAVNG DATA INTO FILE
# network.write(self.data)
return ret_data # RETURNING FILE NAME FOR SUCCESSFUL RETURNS
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,225
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/common/CommandHandler.py
|
from subprocess import getoutput
class CommandHandler:
def __init__(self,command_text=""):
self.command_text=command_text
def getCmdOutput(self,cmdtext):
try:
return getoutput(cmdtext)
except Exception as ex:
return ex
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,226
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/FileInfo.py
|
import os
import win32api
class FileInfo:
'''
class Name:
FileInfo
Function Names:
getDrives()
getFileList(path)
GetCount()
'''
def getDrives(self):
'''
getDrives()
Function Return a object list containing all drives List
Output:
List-->All List of Avilable Drives
'''
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
return drives
def getFileList(self,path):
'''
Get Total File list at given path
getFileList(path):
Example :
Object.getFileList(r"D:\Products\admin\images")
Input :
path-->a valid system path
Output:
False-->If path is not Exists
List-->All Files List
'''
if os.path.exists(path):
allfiledict=[]
final=[]
fil=[final.extend(['{},{}'.format(path,os.path.join(path, name),os.path.splitext(name)[1]) for name in files]) for path, subdirs, files in os.walk(path)]
return final
return False
def GetCount(self):
'''
GetCount() Return all files Count in Your System
Output:
res-->is an dictionary containing all drives and files count
'''
drives=self.getDrives()
filelist=[]
res=[]
for i in drives[1:]:
result={}
result['drive']=i
flist=self.getFileList(i)
filelist.append(flist)
result['count']=len(flist)
res.append(result)
return res
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,227
|
chavarera/Cinfo
|
refs/heads/master
|
/MainUi.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Cinfo.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from lib.windows import SystemInfo,NetworkInfo,SoftwareInfo,StorageInfo
from lib.windows import HardwareInfo,FileInfo,DeviceInfo,MiscInfo,ServiceInfo
from lib.windows.common import Utility as utl
import json
import os
import pickle
class Ui_Cinfo(object):
def __init__(self):
self.module_list = ['system','hardware','network','software','device','storage','service']
self.submodules = []
self.modules=""
self.current_selected = []
self.os = os.name
self.cheklist = []
self.checked_modules = []
self.fetchedData = self.OpenPickle()
self.filterdata = []
def closeEvent(self, event):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setInformativeText("Are you sure you want to close this window?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
msg.setWindowTitle("Are you sure?")
replay=msg.exec_()
if(replay==QtWidgets.QMessageBox.Yes):
exit(0)
else:
pass
def setupUi(self, Cinfo):
Cinfo.setObjectName("Cinfo")
Cinfo.resize(640, 461)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/info.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Cinfo.setWindowIcon(icon)
Cinfo.setIconSize(QtCore.QSize(32, 24))
self.centralwidget = QtWidgets.QWidget(Cinfo)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.Modules_verticalLayout = QtWidgets.QVBoxLayout()
self.Modules_verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.Modules_verticalLayout.setContentsMargins(20, 20, 20, 20)
self.Modules_verticalLayout.setSpacing(1)
self.Modules_verticalLayout.setObjectName("Modules_verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAutoFillBackground(False)
self.label.setLineWidth(1)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label.setObjectName("label")
self.Modules_verticalLayout.addWidget(self.label)
self.gridLayout.addLayout(self.Modules_verticalLayout, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.result_tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.result_tableWidget.setObjectName("result_tableWidget")
self.result_tableWidget.setColumnCount(0)
self.result_tableWidget.setRowCount(0)
self.horizontalLayout.addWidget(self.result_tableWidget)
self.gridLayout.addLayout(self.horizontalLayout, 0, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setTextFormat(QtCore.Qt.PlainText)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 1, 1, 2)
Cinfo.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Cinfo)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 27))
font = QtGui.QFont()
font.setPointSize(12)
self.menubar.setFont(font)
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.menuFile.setFont(font)
self.menuFile.setObjectName("menuFile")
self.menuExport_As = QtWidgets.QMenu(self.menuFile)
font = QtGui.QFont()
font.setPointSize(16)
self.menuExport_As.setFont(font)
self.menuExport_As.setObjectName("menuExport_As")
self.menuOption = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setPointSize(16)
self.menuOption.setFont(font)
self.menuOption.setObjectName("menuOption")
self.menuHelp = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setPointSize(12)
self.menuHelp.setFont(font)
self.menuHelp.setObjectName("menuHelp")
Cinfo.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(Cinfo)
self.toolBar.setLayoutDirection(QtCore.Qt.LeftToRight)
self.toolBar.setMovable(True)
self.toolBar.setIconSize(QtCore.QSize(30, 24))
self.toolBar.setObjectName("toolBar")
Cinfo.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.statusBar = QtWidgets.QStatusBar(Cinfo)
self.statusBar.setObjectName("statusBar")
Cinfo.setStatusBar(self.statusBar)
self.actionExcel = QtWidgets.QAction(Cinfo)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/excel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExcel.setIcon(icon1)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionExcel.setFont(font)
self.actionExcel.setObjectName("actionExcel")
self.actionJson = QtWidgets.QAction(Cinfo)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/Json.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionJson.setIcon(icon2)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionJson.setFont(font)
self.actionJson.setObjectName("actionJson")
self.actionText = QtWidgets.QAction(Cinfo)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/text.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionText.setIcon(icon3)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionText.setFont(font)
self.actionText.setObjectName("actionText")
self.actionRefresh = QtWidgets.QAction(Cinfo)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/Refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRefresh.setIcon(icon4)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.actionRefresh.setFont(font)
self.actionRefresh.setObjectName("actionRefresh")
self.actionExit = QtWidgets.QAction(Cinfo)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/exit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon5)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionExit.setFont(font)
self.actionExit.setObjectName("actionExit")
self.actionAbout = QtWidgets.QAction(Cinfo)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAbout.setIcon(icon6)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionAbout.setFont(font)
self.actionAbout.setObjectName("actionAbout")
self.actionHelp = QtWidgets.QAction(Cinfo)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/help.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionHelp.setIcon(icon7)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionHelp.setFont(font)
self.actionHelp.setObjectName("actionHelp")
self.actionPreferences = QtWidgets.QAction(Cinfo)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/Prefrences.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPreferences.setIcon(icon8)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionPreferences.setFont(font)
self.actionPreferences.setObjectName("actionPreferences")
self.menuExport_As.addAction(self.actionExcel)
self.menuExport_As.addAction(self.actionJson)
self.menuExport_As.addAction(self.actionText)
self.menuFile.addAction(self.actionRefresh)
self.menuFile.addAction(self.menuExport_As.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuOption.addAction(self.actionPreferences)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.actionHelp)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuOption.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.toolBar.addAction(self.actionRefresh)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExcel)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionJson)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionText)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExit)
self.toolBar.addSeparator()
self.comboBoxNew = QtWidgets.QComboBox()
self.Modules_verticalLayout.addWidget(self.comboBoxNew)
self.comboBoxNew.currentTextChanged.connect(self.on_SubModule_change)
self.retranslateUi(Cinfo)
QtCore.QMetaObject.connectSlotsByName(Cinfo)
self.actionJson.triggered.connect(self.ExportToJson)
self.actionExit.triggered.connect(self.closeEvent)
self.AddModules()
def ShowAlertMsg(self,message,types):
if types=="success":
alert_icon=QtWidgets.QMessageBox.Information
alert_type="Success"
if types=="error":
alert_icon=QtWidgets.QMessageBox.Critical
alert_type="Error"
message=message
msg = QtWidgets.QMessageBox()
msg.setIcon(alert_icon)
msg.setInformativeText(str(message))
msg.setWindowTitle(alert_type)
msg.exec_()
def OpenPickle(self,filepath='result.pickle'):
try:
with open(filepath,"rb") as file:
return pickle.load(file)
except:
print("First Run Follwing command on Command Prompt \npython Cinfo.py")
exit(0)
def FilterRecord(self,filters):
if len(filters)>0:
self.filterdata=[self.fetchedData[module] for module in filters]
def ExportToJson(self):
status,res=utl.ExportTOJson(self.fetchedData)
if status:
self.ShowAlertMsg(res,"success")
else:
self.ShowAlertMsg(res,"error")
def SubFilter(self,module,subFilter):
try:
self.current_selected=self.fetchedData[module][subFilter]
except Exception as Ex:
pass
def ModuleInfo(self):
for i in range(self.comboBoxNew.count()+1):
self.comboBoxNew.removeItem(i)
checkeds=[val.isChecked() for val in self.cheklist]
self.checked_modules=[val for status,val in zip(checkeds,self.module_list) if status]
self.modules=self.checked_modules[0]
self.FilterRecord(self.checked_modules)
self.SetData(self.checked_modules)
def on_SubModule_change(self):
current_submodule=self.comboBoxNew.currentText()
self.result_tableWidget.setColumnCount(2)
keys=['Parameter','Value']
self.SubFilter(self.modules,current_submodule)
all_values=self.current_selected[0].keys()
rows_count=0
self.result_tableWidget.setRowCount(0)
if len(self.current_selected)==1:
self.result_tableWidget.insertRow(0)
self.result_tableWidget.setHorizontalHeaderLabels(keys)
for result in self.current_selected:
vals=result.values()
for idx,value in enumerate(result.keys()):
if result[value]!="":
self.result_tableWidget.insertRow(rows_count)
self.result_tableWidget.setItem(rows_count, 0, QtWidgets.QTableWidgetItem(str(value)))
self.result_tableWidget.setItem(rows_count, 1, QtWidgets.QTableWidgetItem(str(result[value])))
rows_count+=1
else:
keys=self.current_selected[0].keys()
self.result_tableWidget.setColumnCount(len(keys))
self.result_tableWidget.setHorizontalHeaderLabels(keys)
for result in self.current_selected:
self.result_tableWidget.insertRow(rows_count)
vals=result.values()
for idx,value in enumerate(vals):
self.result_tableWidget.setItem(rows_count, idx, QtWidgets.QTableWidgetItem(str(value)))
rows_count+=1
self.result_tableWidget.resizeColumnsToContents()
def SetData(self,modules):
self.comboBoxNew.clear()
self.result_tableWidget.setRowCount(0)
self.submodules=[key for key,value in self.filterdata[0].items()]
self.comboBoxNew.addItems(self.submodules)
def AddModules(self):
font = QtGui.QFont()
font.setPointSize(12)
test=[]
for modules in self.module_list:
self.radioButton = QtWidgets.QRadioButton(Cinfo)
self.radioButton.setObjectName(modules)
self.radioButton.setText(modules)
self.radioButton.setFont(font)
self.radioButton.toggled.connect(self.ModuleInfo)
self.Modules_verticalLayout.addWidget(self.radioButton)
self.cheklist.append(self.radioButton)
def retranslateUi(self, Cinfo):
_translate = QtCore.QCoreApplication.translate
Cinfo.setWindowTitle(_translate("Cinfo", "Cinfo"))
self.label.setText(_translate("Cinfo", "Select Module"))
self.label_2.setText(_translate("Cinfo", "Cinfo ( Computer Information )"))
self.menuFile.setTitle(_translate("Cinfo", "File"))
self.menuExport_As.setTitle(_translate("Cinfo", "Export As"))
self.menuOption.setTitle(_translate("Cinfo", "Option"))
self.menuHelp.setTitle(_translate("Cinfo", "Help"))
self.toolBar.setWindowTitle(_translate("Cinfo", "toolBar"))
self.actionExcel.setText(_translate("Cinfo", "Excel"))
self.actionExcel.setToolTip(_translate("Cinfo", "Export Record IntoExcel"))
self.actionJson.setText(_translate("Cinfo", "Json"))
self.actionJson.setToolTip(_translate("Cinfo", "Export into json File"))
self.actionText.setText(_translate("Cinfo", "Text"))
self.actionText.setToolTip(_translate("Cinfo", "Export Into Text File"))
self.actionRefresh.setText(_translate("Cinfo", "Refresh"))
self.actionRefresh.setToolTip(_translate("Cinfo", "refresh"))
self.actionRefresh.setShortcut(_translate("Cinfo", "Ctrl+F5"))
self.actionExit.setText(_translate("Cinfo", "Exit"))
self.actionExit.setToolTip(_translate("Cinfo", "Exit Window"))
self.actionExit.setShortcut(_translate("Cinfo", "Ctrl+Q"))
self.actionAbout.setText(_translate("Cinfo", "About"))
self.actionAbout.setToolTip(_translate("Cinfo", "Information "))
self.actionAbout.setShortcut(_translate("Cinfo", "Ctrl+I"))
self.actionHelp.setText(_translate("Cinfo", "Help"))
self.actionHelp.setShortcut(_translate("Cinfo", "Ctrl+F1"))
self.actionPreferences.setText(_translate("Cinfo", "Preferences"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Cinfo = QtWidgets.QMainWindow()
ui = Ui_Cinfo()
ui.setupUi(Cinfo)
Cinfo.show()
sys.exit(app.exec_())
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,228
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_hw_info.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
from tabulate import tabulate
class get_hw_info:
'''
get_hw_info HAVE A SINGLE METHOD AND A CONSTRUCTOR FUNCTION WHICH ARE NAMED AS :
1) __init__
2) work()
__init__ DOCFILE:
__init__ CONTAINS INITIALISED AND UNINITIALISED VARIABLES FOR LATER USE BY CLASS METHODS.
WORK() DOCFILE:
work() RETURN A DATA VARIABLE CONTAINING GIVEN DATA :
1) BASIC INFORMATION
2) MEMORY STATISTICS
3) INSTALLED DRIVERS LIST
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ CONTAINS INITIALISED AND UNINITIALISED VARIABLES FOR LATER USE BY CLASS METHODS.
'''
self.mem_info = "" # TO SAVE MEMORY INFO
self.drivers = [] # TO SAVE LIST OF INSTALLED DRIVERS
self.drivers_data = [] # TO SAVE MODIFIED DATA INTO A SEPERATE LIST
self.cpu_info = [] # TO SAVING CPU INFORMATION
self.ram_size = " " # TO SAVE RAM SIZE
self.data = "" # TO SAVE THE FINALIZED DATA TO BE RETURNED
def work(self):
'''
WORK() DOCFILE:
work() RETURN A DATA VARIABLE CONTAINING GIVEN DATA :
1) BASIC INFORMATION
2) MEMORY STATISTICS
3) INSTALLED DRIVERS LIST
'''
# CPU INFO
self.cpu_info = os.popen("lscpu | grep -e 'Model name' -e 'Architecture'").read().split('\n') # COOLLECTING CPU INFO AND SAVING IT IN A LIST
(self.cpu_info[0], self.cpu_info[1]) = (self.cpu_info[1], self.cpu_info[0]) # REARRANGING DATA
self.cpu_info = [cpu.split(' ') for cpu in self.cpu_info] # SPLITTING LIST ELEMENTS INTO A SUBLIST
self.cpu_info.pop() # REMOVING LAST ELEMENTS
for cpu in self.cpu_info: # REMOVING EXTRA ELEMENTS
cpu[0] = cpu[0][:len(cpu[0])-1] # REMOVING ':' FROM FIRST ELEMENTS OF THE LIST
try:
while True:
cpu.remove('')
except Exception as e:
pass
# KERNEL DRIVERS
self.drivers = os.popen("ls -l /lib/modules/$(uname -r)/kernel/drivers/").read().split('\n') # COLLECTING DRIVER DETAILS
self.drivers = [drive.split(' ') for drive in self.drivers] # SPLITTIG DATA
self.drivers.pop(0) # REMOVING REDUNDANT FIRST
self.drivers.pop() # REMOVING LAST ELEMENT
self.drivers = [driver[len(driver)-1] for driver in self.drivers]
for index in range(0,len(self.drivers),4): # LISTING ELEMENTS INTO FOUR SEPERATE LISTS
try:
self.drivers_data.append([ self.drivers[index], self.drivers[index+1], self.drivers[index+2], self.drivers[index+3]])
except:
try:
self.drivers_data.append([ self.drivers[index], self.drivers[index+1], self.drivers[index+2]])
except:
try:
self.drivers_data.append([ self.drivers[index], self.drivers[index+1]])
except:
self.drivers_data.append([ self.drivers[index] ])
# MEMORY INFO
self.mem_info = os.popen("free").read().split('\n') # SAVING MEMORY STATS INTO LIST
self.mem_info = [mem.split(" ") for mem in self.mem_info] # SUBLISTING THE ELEMENTS IN LIST
for mem in self.mem_info: # REMOVING REDUNDANT ELEMENTS FROM LIST
try:
while True:
mem.remove('')
except Exception as e:
pass
self.mem_info.pop() # REMOVING LAST REDUNDANT ELEMENT
self.mem_info[0].insert(0, 'Memory Type') # INSERTNG NEW HEADER ELEMENT AT START OF LIST
for mem in self.mem_info[1:]: # CONVERTING kB DATA TO gB AND ADDING GB AT END OF MEMORY STAT
for m in range(1,len(mem)):
mem[m] = str(int(mem[m])/1000000) + " GB"
for mem in self.mem_info: # ADDING - AT MISSING DATA
if len(mem) <= len(self.mem_info[0]):
for i in range(0, len(self.mem_info[0]) - len(mem)):
mem.append('-')
# RAM SIZE
self.ram_size = self.mem_info[1][1] # COLLECTING INSTALLED MEMORY INFO FROM MEMORY STATS
self.cpu_info.append(["Installed RAM", self.ram_size]) # ADDING THIS DATA INTO LIST CONTAINIMG BASIC DETAILS
# SAVING DATA INTO A DATA VARIABLE WHICH CAN BE RETURNED LATER
self.data += "-------------------- BASIC INFORMATION --------------------\n"
self.data += tabulate(self.cpu_info, headers=['PROPERTY', 'VALUE'],tablefmt="fancy_grid")
self.data += "\n\n\n--------------------------------------- MEMORY STATS ---------------------------------------\n"
self.data += tabulate(self.mem_info[1:], headers=self.mem_info[0],tablefmt="fancy_grid")
self.data += "\n\n\n-------------- DRIVERS INSTALLED --------------\n"
self.data += tabulate(self.drivers_data, headers=['LIST 1','LIST 2','LIST 3','LIST 4'],tablefmt="fancy_grid")
# RETURNING DATA VARIABLE
return self.data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,229
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/DeviceInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
from lib.windows.common import Utility as utl
class DeviceInfo:
def __init__(self):
self.cmd=CommandHandler()
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def GetDeviceInfo(self):
device_info={}
device_list=['PRINTER','SOUNDDEV','DESKTOPMONITOR']
for part in device_list:
device_info[part]=self.Preprocess(part)
return device_info
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,230
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_package_list.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
class get_package_list:
'''
get_package_list CLASS COMBINE A SINGLE METHOD AND A CONSTRUCTOR, WHICH ARE AS FOLLOWS:
1) __init__
2) work()
__init__ DOCFILE:
__init__ SERVES THE PURPOSE TO INITIALISE VARIABLES WHICH AREGONG TO BE USED LATER IN PROGRAM.
work() DOCFILE :
work() FUNCTION WORKS THIS WAY:
1) SEARCHES FOR FILES IN /usr/bin/.
2) REFINE FILES WHICH ARE NOT SCRIPTS
3) SAVE THEM IN A FILE.
4) RETURNS TRUE FOR SUCCESS
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ SERVES THE PURPOSE TO INITIALISE VARIABLES WHICH AREGONG TO BE USED LATER IN PROGRAM.
'''
self.file_path = "/usr/bin/" # SETTING UP FILE PATH TO FIND PACKAGES
self.files_found = os.listdir(self.file_path) # FINDING FILES AND SAVING THEM IN A LIST
self.data = "S.No., Package Name\n" # INITIALISING VARIABLE TO STORE DATA LATER
self.current_path = os.getcwd() # SAVING THE CURRENT WORKING DIRECTORY FOR LATER USE
self.count = 0 # TO KEEP COUND OF NUMBER OF PACKAGES FOUND
def work(self):
'''
work() DOCFILE :
work() FUNCTION WORKS THIS WAY:
1) SEARCHES FOR FILES IN /usr/bin/.
2) REFINE FILES WHICH ARE NOT SCRIPTS
3) SAVE THEM IN A FILE.
4) RETURNS TRUE FOR SUCCESS
'''
# CHANGING WORKING DIRECTORY
os.chdir(self.file_path) # CHANGING CURRENT WORKING DIRECTORY
ret_data = {"List of Installed Applications" : [["Applications Name"]]}
# LISTING ALL FILES AND SERIAL NUMBER EXCLUDING FOLDERS
for file in self.files_found: # CHECKING EACH SCANNED FILE ONE BY ONE
if not os.path.isdir(file): # CHECKING IS SCANNED FILE IS A FILE OR FOLDER
if not file.endswith(".sh"): # REMOVING SCRIPT FILES
self.count += 1 # IF IT IS A FILE, COUNTING INCREASES BY 1
self.data += str(self.count) + "," + file + "\n" # SAVING THE PACKAGE NAME AND SERIAL NUMBER IN DATA VARIABLE
ret_data["List of Installed Applications"].append([file])
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("linux_packages_installed.csv", 'w') as pack: # OPENNG NEW FILE TO SAVE DATA
pack.write(self.data) # WRITING DATA TO FILE
return ret_data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,231
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/StorageInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
import math
from lib.windows.common import Utility as utl
import wmi
class StorageInfo:
'''
className:StorageInfo
Description:this will return the Disk Total Size and partitions details and Ram Details
call this method:
objectName.getStorageinfo()
'''
def __init__(self):
self.cmd=CommandHandler()
def convert_size(self,size_bytes):
'''
Accept the integer bytes size and convert into KB,MB,GB sizes
'''
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def getDiskSize(self):
'''
Return the Total Disk Size
'''
cmd='wmic diskdrive GET caption,size'
result=self.cmd.getCmdOutput(cmd)
list_disk=[]
for i in result.splitlines():
splited_text=i.split()
disk={}
if len(splited_text)>2:
name=" ".join(splited_text[:-1])
size=splited_text[-1]
try:
size=self.convert_size(int(size))
except ValueError:
size=None
pass
disk['Name']=name
disk['TotalSize']=size
list_disk.append(disk)
return list_disk
def getRamSize(self):
'''
Return Total Usable Ram Size
'''
comp = wmi.WMI()
ram=[]
for i in comp.Win32_ComputerSystem():
ram_sizes={}
ram_sizes['PhysicalMemory']=self.convert_size(int(i.TotalPhysicalMemory))
ram.append(ram_sizes)
return ram
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def getLogicalDisk(self):
'''
Returns the Disk partitions details
'''
cmd='wmic logicaldisk get size,freespace,caption'
result=self.cmd.getCmdOutput(cmd)
drives=[]
for i in result.splitlines():
splited_text=i.split()
if ':' in i and len(splited_text)>2:
drive={}
drive['Name']=splited_text[0].split(":")[0]
drive['FreeSpace']=self.convert_size(int(splited_text[1]))
drive['TotalSize']=self.convert_size(int(splited_text[2]))
drives.append(drive)
return drives
def getStorageinfo(self):
'''
Return:Logical disks,Ram,Total Disk Size
'''
sinfo={}
sinfo['Partions']=self.getLogicalDisk()
sinfo['Ram']=self.getRamSize()
sinfo['DiskSize']=self.getDiskSize()
storage_catgories=['logicaldisk','CDROM','DEVICEMEMORYADDRESS','DISKDRIVE','DISKQUOTA','DMACHANNEL','LOGICALDISK','MEMCACHE','MEMORYCHIP','MEMPHYSICAL','PAGEFILE','PARTITION','VOLUME']
for part in storage_catgories:
sinfo[part]=self.Preprocess(part)
return sinfo
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,232
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/list_files.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
import filetype
import json
from datetime import datetime
class list_files:
'''
LIST_FILES CLASS CONTAINS THREE FUNCTIONS:
1) __INIT__
2) WORK()
3) TYPE_COUNT()
INIT BLOCK DOCKINFO :
INIT BLOCK INITIATES TWO VARIABLES 'ALL_DATA' WHICH IS A LIST THAT WILL CONTAIN ALL THE FETCHED FILES LATER and IT ALSO CONTAINS 'COUNT' WHICH KEEPS
RECORD FOR THE NUMBER OF FILES.
WORK() FUNCTION DOCINFO:
1) WORK FUNCTION IS THE MAIN FUNCTION OF CLASS WHICH FINDS ALL THE FILES AND GIVES THE OUTPUT IN FILE "File Found.csv" IN SAME DIRECTORY AS IN SCRIPT
RESIDES,
2) IT RETURNS NUMBER OF FILES FOUND AS A RETURN VALUE.
type_count() DOCFILE:
type_count() CHEKCS THE EXTENSIONS OF SCANNED FILES FROM THE OUTPUT FILE AND RETURN A TUPLE OF COUNTS WITH GIVEN FORMAT
OUTPUT FORMAT:
(VIDEO COUNT, AUDIO COUNT, IMAGE COUNT, OTHERS)
'''
def __init__(self):
'''
INIT BLOCK DOCKINFO :
INIT BLOCK INITIATES TWO VARIABLES 'ALL_DATA' WHICH IS A LIST THAT WILL CONTAIN ALL THE FETCHED FILES LATER and IT ALSO CONTAINS 'COUNT' WHICH KEEPS
RECORD FOR THE NUMBER OF FILES.
'''
self.all_data = []
self.categories = {"other":0,"images":0,"videos":0,"audios":0,"archives":0,"fonts":0,}
self.extension_count = {"other":0}
self.count = 0
self.images = ["jpg","jpx","png","gif","webp","cr2","tif","bmp","jxr","psd","ico","heic"]
self.videos = ["mp4", "m4v", "mkv", "webm", "mov", "avi", "wmv", "mpg", "flv"]
self.audios = ["mid", "mp3", "m4a", "ogg", "flac", "wav", "amr"]
self.archives = ["epub", "zip", "tar", "rar", "gz", "bz2", "7z", "xz", "pdf", "exe", "swf", "rtf", "eot", "ps", "sqlite", "nes", "crx", "cab", "deb", "ar", "Z", "lz"]
self.fonts = ["woff", "woff2", "ttf", "otf"]
self.current_path = os.getcwd()
def work(self):
'''
WORK() FUNCTION DOCINFO:
1) WORK FUNCTION IS THE MAIN FUNCTION OF CLASS WHICH FINDS ALL THE FILES AND GIVES THE OUTPUT IN FILE "File Found.csv" IN SAME DIRECTORY AS IN SCRIPT
RESIDES,
2) IT RETURNS NUMBER OF FILES FOUND AS A RETURN VALUE.
'''
ret_data = {"Files":[]}
print("Starting work....", end='\r')
for (root, dirs, files) in os.walk('/home/royal/Documents/KWOC/Cinfo', topdown=True): # FINDING ALL FIES IN ROOT DIRECTORY
file_list = [file+","+root+'/'+file for file in files] # MODIFYING FILE LIST ACCORDING TO REQUIRED FORMAT
self.all_data.extend(file_list) # SAVING ALL FILES FOUND IN CURRENT DIRECTORY INTO ALL_DATA LIST WHICH IS GLOBAL LIST FOR ALL FILES
for file in files:
if '.' in file:
if file.split('.')[-1].lower() in self.images:
self.categories["images"] += 1
if file.split('.')[-1].lower() not in self.extension_count.keys():
self.extension_count[file.split('.')[-1].lower()] = 1
else:
self.extension_count[file.split('.')[-1].lower()] += 1
elif file.split('.')[-1].lower() in self.videos:
self.categories["videos"] += 1
if file.split('.')[-1].lower() not in self.extension_count.keys():
self.extension_count[file.split('.')[-1].lower()] = 1
else:
self.extension_count[file.split('.')[-1].lower()] += 1
elif file.split('.')[-1].lower() in self.audios:
self.categories["audios"] += 1
if file.split('.')[-1].lower() not in self.extension_count.keys():
self.extension_count[file.split('.')[-1].lower()] = 1
else:
self.extension_count[file.split('.')[-1].lower()] += 1
elif file.split('.')[-1].lower() in self.archives:
self.categories["archives"] += 1
if file.split('.')[-1].lower() not in self.extension_count.keys():
self.extension_count[file.split('.')[-1].lower()] = 1
else:
self.extension_count[file.split('.')[-1].lower()] += 1
elif file.split('.')[-1].lower() in self.fonts:
self.categories["fonts"] += 1
if file.split('.')[-1].lower() not in self.extension_count.keys():
self.extension_count[file.split('.')[-1].lower()] = 1
else:
self.extension_count[file.split('.')[-1].lower()] += 1
else:
self.categories["other"] += 1
self.extension_count["other"] +=1
else:
self.categories["other"] += 1
self.extension_count["other"] +=1
self.count += len(file_list) # INCREASING COUNT BY THE SAME NUMBER OF FILES, FOUND IN CURRENT DIRECTORY
print("Found %d files"%(self.count), end='\r')
data = "File Name, File Address\n" # INITIAL SETUP FOR DATA VARIABLE WHICH WILL STORE ALL FILE NAME IN FORMATTED WAY
data += '\n'.join(self.all_data) # ADDING FILES DATA INTO DATA VARIABLE SO THAT IT CAN BE WRITTEN DIRECTLY
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path)
with open("File list.csv", "w") as output: # OOPENING FILE TO BE WRITTEN IN WRITE MODE
output.write(data)
# DATA VARIABLE IS WRITTEN HERE INTO FILE
ret_data["Files"] =[i.split(',') for i in data.split('\n')]
data = {}
data["Total Files"] = []
data["Total Files"].append({
"No of files":self.count
})
data["Category"] = []
for i in self.categories:
data["Category"].append(
{
i : self.categories[i]
}
)
for i in self.extension_count:
data["Category"].append(
{
i : self.extension_count[i]
}
)
os.chdir(self.current_path)
with open("File Overview.json","w") as filecount:
json.dump(data,filecount)
## Preparing dictionary for UI
tempList = []
for eachDict in data["Category"]:
tempList.append([list(eachDict.keys())[0] , str(list(eachDict.values())[0])])
data["Category"] = tempList
data["Category"].insert(0,["Total Files" , str(list(data["Total Files"][0].values())[0])])
data["Category"].insert(0,["File Type", "No of Files Found"])
ret_data["Files Overview"] = data["Category"]
return ret_data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,233
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_os_info.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
from tabulate import tabulate
class get_os_info:
'''
CLASS get_base_info PROVIDES ALL DETAILS REGARDING OS, CPU AND USERS IN MACHINE,
IT CONTAINS TWO FUNCTIONS I.E.
1) __init__
2) work()
__init__ DOCKINFO:
THIS BLOCK CONTAINS A SIGLE INITIALISED VARIABLES THAT WILL CONTAIN ALL THE INFORMATION RELATED TO OS, CPU, AND USERS IN MACHINE.
work() DOCINFO:
THIS FUNCTIONS WORKS IN THE FOLLOWING WAYS:
1) CAPTURING DETAILS.
2) FORMATTING THE OUPUT.
3) SAVING THE OUTPUT IN A VARIABLE.
4) THE VARIABLE IS THEN FINALLY RETURNED.
'''
def __init__(self):
'''
__init__ DOCKINFO:
THIS BLOCK CONTAINS A SIGLE INITIALISED VARIABLES THAT WILL CONTAIN ALL THE INFORMATION RELATED TO OS, CPU, AND USERS IN MACHINE.
'''
self.details = "------------------------------ OS Information ------------------------------\n"
def work(self):
data = {"OS Information" : [],"CPU Information" : [],"Users In Machine" : [],}
temp = []
'''
work() DOCINFO:
THIS FUNCTIONS WORKS IN THE FOLLOWING WAYS:
1) CAPTURING DETAILS.
2) FORMATTING THE OUPUT.
3) SAVING THE OUTPUT IN A VARIABLE.
4) THE VARIABLE IS THEN FINALLY RETURNED.
'''
os_ker_arch = os.popen("hostnamectl | grep -e 'Machine ID' -e 'Boot ID' -e 'Operating System' -e Kernel -e Architecture").read()
os_more = os.popen("lscpu | grep -e 'Model name' -e 'CPU MHz' -e 'CPU max MHz' -e 'CPU min MHz' -e 'CPU op-mode(s)' -e 'Address sizes' -e 'Thread(s) per core' -e Kernel -e 'Core(s) per socket' -e 'Vendor ID' -e Virtualization -e 'L1d cache' -e 'L1i cache' -e 'L2 cache' -e 'NUMA node0 CPU(s)'").read()
os_ker_arch = os_ker_arch.replace(" ", "")
temp_container = []
## LIST CONVERSION
os1 = os_ker_arch.split('\n')
os1.pop()
os2 = os_more.split("\n")
# OS-DETAILS ADDED HERE
for fetch in range(2, len(os1)):
temp_container.append(os1[fetch].split(':'))
temp_container.append(os1[0].split(':'))
temp_container.append(os1[1][1:].split(':'))
if temp_container[-1] == '':
temp_container.pop()
self.details += tabulate(temp_container, headers = ["Property", "Value"],tablefmt="fancy_grid")
temp = temp_container.copy()
temp.insert(0,["Property", "Value"])
data["OS Information"].extend(temp)
#print(temp)
temp_container.clear()
self.details += "\n\n\n------------------------------ CPU Information ------------------------------\n"
# CPU-INFORMTION ADDED HERE
for fetch in range(4, 10):
temp_container.append(os2[fetch].split(':'))
temp_container.append(os2[2].split(':'))
temp_container.append(os2[3].split(':'))
temp_container.append(os2[0].split(':'))
temp_container.append(os2[1].split(':'))
for fetch in range(10, len(os2)):
temp_container.append(os2[fetch].split(':'))
if temp_container[-1] == '':
temp_container.pop()
self.details += tabulate(temp_container, headers = ["Property", "Value"],tablefmt="fancy_grid")
temp = temp_container.copy()
temp.insert(0,["Property", "Value"])
temp.pop()
data["CPU Information"].extend(temp)
# FETCHING USERNAMES FROM OS
user_name_string = os.popen("lslogins -u").read()
user_name_list = user_name_string.split('\n')
user_name_list.pop()
user_names = "root\n"
final_usernames = []
for user in user_name_list:
final_usernames.append(user.split(" ")[1])
final_usernames.pop(0)
temp_container.clear()
temp_container.append(["root"])
for user in final_usernames:
if user != '':
temp_container.append([user])
if temp_container[-1] == '':
temp_container.pop()
self.details += "\n\n\n------------------------------ Users in Machine ------------------------------\n"
self.details += tabulate(temp_container, headers = ["Usernames"],tablefmt="fancy_grid")
temp = temp_container.copy()
temp.insert(0,["Usernames"])
#print(temp)
data["Users In Machine"].extend(temp)
for i in data["CPU Information"]:
i[1] = i[1].strip()
# RETURNING ALL FINALISED DETAILS
# print(self.details)
return data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,234
|
chavarera/Cinfo
|
refs/heads/master
|
/WindowsInfo.py
|
from lib.windows import SystemInfo,NetworkInfo,SoftwareInfo,StorageInfo
from lib.windows import HardwareInfo,FileInfo,DeviceInfo,MiscInfo,ServiceInfo
import os
import json
import pickle
def Display(d, indent=0):
return json.dumps(d,sort_keys=True, indent=4)
def SavePickle(data):
with open('result.pickle','wb') as file:
pickle.dump(data,file)
def CallData():
Container={'system':SystemInfo.SystemInfo().GetSystemInfo(),
'hardware':HardwareInfo.HardwareInfo().getHardwareinfo(),
'network':NetworkInfo.NetworkInfo().networkinfo(),
'software':SoftwareInfo.SoftwareInfo().getSoftwareList(),
'device':DeviceInfo.DeviceInfo().GetDeviceInfo(),
'storage':StorageInfo.StorageInfo().getStorageinfo(),
'service':ServiceInfo.ServiceInfo().getServiceInfo()
}
#Pretty Print Result
cdata=Display(Container)
SavePickle(Container)
try:
CallData()
except Exception as ex:
print(ex)
else:
print("Now Run \npython MainUi.py")
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,235
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/common/Utility.py
|
import time
import json
def CsvTextToDict(text):
lines = text.strip().splitlines()
keys=lines[0].split(",")
items=[]
for line in lines[1:]:
if len(line)>0:
items.append(dict(zip(keys,line.split(","))))
return items
def ExportTOJson(data):
timestr = time.strftime("%Y%m%d-%H%M%S")
filename=f'output/{timestr}.json'
try:
with open(filename, 'w') as fp:
json.dump(data,fp)
return True,f"successfully saved fille in {filename}"
except Exception as ex:
return False,ex
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,236
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/SoftwareInfo.py
|
try:
import _winreg as reg
except:
import winreg as reg
class SoftwareInfo:
'''
className:SoftwareInfo
Description:Return the Installed Software name with version and publisher name
'''
def getVal(self,name,asubkey):
try:
return reg.QueryValueEx(asubkey, name)[0]
except:
return "undefined"
def getCheck(self,all_softwares,version,publisher):
val=0
for i in all_softwares:
if(i['version']==version) and (i['publisher']==publisher):
val=1
return val
def getReg_keys(self,flag):
Hkeys=reg.HKEY_LOCAL_MACHINE
path=r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall'
Regkey = reg.ConnectRegistry(None, Hkeys)
key = reg.OpenKey(Regkey, path,0, reg.KEY_READ | flag)
key_count = reg.QueryInfoKey(key)[0]
all_softwares=[]
for i in range(key_count):
singsoft={}
try:
keyname=reg.EnumKey(key, i)
asubkey = reg.OpenKey(key, keyname)
data=["DisplayName","DisplayVersion","Publisher"]
name=self.getVal(data[0],asubkey)
version=self.getVal(data[1],asubkey)
publisher=self.getVal(data[2],asubkey)
if(name!='undefined' and version!="undefined" and publisher!="undefined"):
val=self.getCheck(all_softwares,version,publisher)
if val!=1:
singsoft['name']=name
singsoft['version']=version
singsoft['publisher']=publisher
all_softwares.append(singsoft)
except Exception as ex:
continue
return all_softwares
def getSoftwareList(self):
'''
Get All installed Softwae in th list format with name,version,publisher
'''
try:
all_installed_apps={}
all_installed_apps["installedPrograms"]=self.getReg_keys(reg.KEY_WOW64_32KEY)+(self.getReg_keys(reg.KEY_WOW64_64KEY))
all_installed_apps["WebBrowsers"]=self.GetInstalledBrowsers()
return all_installed_apps
except Exception as ex:
return ex
def GetInstalledBrowsers(self):
'''
usage:object.GetInstalledBrowsers()
Output:
browser_list-->list
'''
path='SOFTWARE\Clients\StartMenuInternet'
Hkeys=reg.HKEY_LOCAL_MACHINE
Regkey = reg.ConnectRegistry(None, Hkeys)
key = reg.OpenKey(Regkey, path,0, reg.KEY_READ | reg.KEY_WOW64_32KEY)
key_count = reg.QueryInfoKey(key)[0]
browser={}
browser_list=[]
for i in range(key_count):
singsoft={}
try:
keyname=reg.EnumKey(key, i)
singsoft['id']=i
singsoft['Name']=keyname
browser_list.append(singsoft)
except Exception as ex:
continue
return browser_list
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,237
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/SystemInfo.py
|
from lib.windows.common.CommandHandler import CommandHandler
from lib.windows.common.RegistryHandler import RegistryHandler
from lib.windows.common import Utility as utl
from datetime import datetime
import platform
class SystemInfo:
'''
Class Name:SystemInfo
Desciption:this class used to fetch the operating system related information
call this method to get all system related data:
objectName.GetSystemInfo()
'''
def __init__(self):
self.cmd=CommandHandler()
def Preprocess(self,text):
cmd=f'wmic {text} list /format:csv'
Command_res=self.cmd.getCmdOutput(cmd)
result=utl.CsvTextToDict(Command_res)
return result
def getPlatform(self,name):
'''Return a string machine platform windows or ubuntu
call this method
objectName.getPlatform()
'''
try:
return getattr(platform, name)()
except:
return None
def getMachineName(self):
'''Return machine name
call this method
objectName.getMachineName()
'''
try:
return platform.node()
except:
return None
def get_reg_value(self,name):
'''Return string value of given key name inside windows registery
Hkeys=reg.HKEY_LOCAL_MACHINE
path=r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
call this method
objectName.get_reg_value(name)
'''
try:
path=r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
reg=RegistryHandler("HLM",path)
return reg.getValues(name)
except:
return None
def GetSystemInfo(self):
'''
This Method Return a dictionary object of System Information using Windows Registery and module platform
class this method
objectname.GetSystemInfo()
'''
#Create a Dictionary object for saving all data
system_data={}
#Get System information using Registry
reg_data=['ProductName','InstallDate','PathName','ReleaseId','CompositionEditionID','EditionID','SoftwareType',
'SystemRoot','ProductId','BuildBranch','BuildLab','BuildLabEx','CurrentBuild']
for name in reg_data:
value=self.get_reg_value(name)
if name=="CompositionEditionID":
system_data["CompositionID"]=value
elif name=="InstallDate":
system_data[name]=str(datetime.fromtimestamp(value))
else:
system_data[name]=value
#Get system information using platform module
platform_data=['machine','node','platform','system','release','version','processor']
platform_name=['Machine Name','Network Name','Platform Type','System Type','Release No ','Version No','Processor Name']
for idx,name in enumerate(platform_data):
value=self.getPlatform(name)
names=platform_name[idx]
system_data[names]=value
system_categories=['OS','TIMEZONE','BOOTCONFIG','COMPUTERSYSTEM','STARTUP']
Final_result={}
Final_result['SystemData']=[system_data]
for part in system_categories:
Final_result[part]=self.Preprocess(part)
return Final_result
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,238
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/windows/common/RegistryHandler.py
|
try:
import _winreg as reg
except:
import winreg as reg
class RegistryHandler:
def __init__(self,key,path):
self.Hkey=self.getRootKey(key)
self.path=path
self.key = reg.OpenKey(self.Hkey, self.path)
def getRootKey(self,key):
ROOTS={'HCR':reg.HKEY_CLASSES_ROOT,
'HCU':reg.HKEY_CURRENT_USER,
'HLM':reg.HKEY_LOCAL_MACHINE,
'HU':reg.HKEY_USERS,
'HCC':reg.HKEY_CURRENT_CONFIG
}
try:
return ROOTS[key]
except Exception as ex:
return ex
def getKeys(self):
key_count = reg.QueryInfoKey(self.key)[0]
self.key.Close()
return key_count
def getValues(self,name):
'''Return string value of given key name inside windows registery
'''
return reg.QueryValueEx(self.key, name)[0]
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,239
|
chavarera/Cinfo
|
refs/heads/master
|
/linuxUI.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainUi.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
import os
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
from lib.linux import get_browsers,get_drives,get_hw_info,get_network_info,get_os_info,get_package_list,get_ports,get_startup_list,list_files
class Ui_Cinfo(object):
def setupUi(self, Cinfo):
Cinfo.setObjectName("Cinfo")
Cinfo.resize(777, 461)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Cinfo.setWindowIcon(icon)
Cinfo.setIconSize(QtCore.QSize(32, 24))
self.centralwidget = QtWidgets.QWidget(Cinfo)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
## Home Page
self.homePage = QtWidgets.QRadioButton(self.centralwidget)
self.homePage.setObjectName("homePage")
self.homePage.toggled.connect(lambda: self.toggleCheck(self.homePage,0))
self.verticalLayout_2.addWidget(self.homePage)
## About Your Machine
self.aboutYourMachine = QtWidgets.QRadioButton(self.centralwidget)
self.aboutYourMachine.setObjectName("aboutYourMachine")
self.aboutYourMachine.toggled.connect(lambda: self.toggleCheck(self.aboutYourMachine,5))
self.verticalLayout_2.addWidget(self.aboutYourMachine)
## For Network
self.networkInfo = QtWidgets.QRadioButton(self.centralwidget)
self.networkInfo.setObjectName("networkInfo")
self.networkInfo.toggled.connect(lambda: self.toggleCheck(self.networkInfo,4))
self.verticalLayout_2.addWidget(self.networkInfo)
## For Installed Applications
self.instaLledApplications = QtWidgets.QRadioButton(self.centralwidget)
self.instaLledApplications.setObjectName("instaLledApplications")
self.instaLledApplications.toggled.connect(lambda: self.toggleCheck(self.instaLledApplications,3))
## For Installed Browsers
self.installedBrowsers = QtWidgets.QRadioButton(self.centralwidget)
self.installedBrowsers.setObjectName("installedBrowsers")
self.installedBrowsers.toggled.connect(lambda: self.toggleCheck(self.installedBrowsers,6))
self.verticalLayout_2.addWidget(self.installedBrowsers)
## For Startup Applications
self.startUpapplications = QtWidgets.QRadioButton(self.centralwidget)
self.startUpapplications.setObjectName("startUpapplications")
self.startUpapplications.toggled.connect(lambda: self.toggleCheck(self.startUpapplications,2))
self.verticalLayout_2.addWidget(self.startUpapplications)
self.verticalLayout_2.addWidget(self.instaLledApplications)
## Opened Ports
self.openedPorts = QtWidgets.QRadioButton(self.centralwidget)
self.openedPorts.setObjectName("openedPorts")
self.openedPorts.toggled.connect(lambda: self.toggleCheck(self.openedPorts,7))
self.verticalLayout_2.addWidget(self.openedPorts)
## For Listing files
self.listfIles = QtWidgets.QRadioButton(self.centralwidget)
self.listfIles.setObjectName("listfIles")
self.listfIles.toggled.connect(lambda: self.toggleCheck(self.listfIles,1))
self.verticalLayout_2.addWidget(self.listfIles)
self.gridLayout.addLayout(self.verticalLayout_2, 2, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 4, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setProperty("showDropIndicator", True)
self.tableWidget.setShowGrid(True)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget.verticalHeader().setSortIndicatorShown(False)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.tableWidget.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.gridLayout.addWidget(self.tableWidget, 2, 4, 1, 1)
self.tables = QtWidgets.QComboBox(self.centralwidget)
self.tables.setObjectName("tables")
self.gridLayout.addWidget(self.tables, 1, 4, 1, 1)
Cinfo.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Cinfo)
self.menubar.setGeometry(QtCore.QRect(0, 0, 777, 26))
font = QtGui.QFont()
font.setPointSize(12)
self.menubar.setFont(font)
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.menuFile.setFont(font)
self.menuFile.setObjectName("menuFile")
self.menuExport_As = QtWidgets.QMenu(self.menuFile)
font = QtGui.QFont()
font.setPointSize(16)
self.menuExport_As.setFont(font)
self.menuExport_As.setObjectName("menuExport_As")
self.menuOption = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setPointSize(16)
self.menuOption.setFont(font)
self.menuOption.setObjectName("menuOption")
self.menuHelp = QtWidgets.QMenu(self.menubar)
font = QtGui.QFont()
font.setPointSize(12)
self.menuHelp.setFont(font)
self.menuHelp.setObjectName("menuHelp")
Cinfo.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(Cinfo)
self.toolBar.setLayoutDirection(QtCore.Qt.LeftToRight)
self.toolBar.setMovable(True)
self.toolBar.setIconSize(QtCore.QSize(30, 24))
self.toolBar.setObjectName("toolBar")
Cinfo.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.statusBar = QtWidgets.QStatusBar(Cinfo)
self.statusBar.setObjectName("statusBar")
Cinfo.setStatusBar(self.statusBar)
self.actionExcel = QtWidgets.QAction(Cinfo)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/excel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExcel.setIcon(icon1)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionExcel.setFont(font)
self.actionExcel.setObjectName("actionExcel")
self.actionJson = QtWidgets.QAction(Cinfo)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/Json.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionJson.setIcon(icon2)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionJson.setFont(font)
self.actionJson.setObjectName("actionJson")
self.actionText = QtWidgets.QAction(Cinfo)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/text.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionText.setIcon(icon3)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionText.setFont(font)
self.actionText.setObjectName("actionText")
self.actionRefresh = QtWidgets.QAction(Cinfo)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/Refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRefresh.setIcon(icon4)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.actionRefresh.setFont(font)
self.actionRefresh.setObjectName("actionRefresh")
self.actionExit = QtWidgets.QAction(Cinfo)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/exit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon5)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionExit.setFont(font)
self.actionExit.setObjectName("actionExit")
self.actionAbout = QtWidgets.QAction(Cinfo)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAbout.setIcon(icon6)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionAbout.setFont(font)
self.actionAbout.setObjectName("actionAbout")
self.actionHelp = QtWidgets.QAction(Cinfo)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/help.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionHelp.setIcon(icon7)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionHelp.setFont(font)
self.actionHelp.setObjectName("actionHelp")
self.actionPreferences = QtWidgets.QAction(Cinfo)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("icons/Prefrences.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionPreferences.setIcon(icon8)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.actionPreferences.setFont(font)
self.actionPreferences.setObjectName("actionPreferences")
self.menuExport_As.addAction(self.actionExcel)
self.menuExport_As.addAction(self.actionJson)
self.menuExport_As.addAction(self.actionText)
self.menuFile.addAction(self.actionRefresh)
self.menuFile.addAction(self.menuExport_As.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuOption.addAction(self.actionPreferences)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.actionHelp)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuOption.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.toolBar.addAction(self.actionRefresh)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExcel)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionJson)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionText)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionExit)
self.toolBar.addSeparator()
self.retranslateUi(Cinfo)
QtCore.QMetaObject.connectSlotsByName(Cinfo)
def retranslateUi(self, Cinfo):
_translate = QtCore.QCoreApplication.translate
Cinfo.setWindowTitle(_translate("Cinfo", "Cinfo"))
self.homePage.setText(_translate("Cinfo", "Home"))
self.listfIles.setText(_translate("Cinfo", "List Files"))
self.startUpapplications.setText(_translate("Cinfo", "List Startup Applications"))
self.instaLledApplications.setText(_translate("Cinfo", "List Installed Applications"))
self.networkInfo.setText(_translate("Cinfo", "Network Information"))
self.aboutYourMachine.setText(_translate("Cinfo", "About Your Machine"))
self.installedBrowsers.setText(_translate("Cinfo", "List Installed Browsers"))
self.openedPorts.setText(_translate("Cinfo", "List Open Ports"))
self.label.setText(_translate("Cinfo", "Choose Service :"))
self.label_2.setText(_translate("Cinfo", "Result :"))
self.menuFile.setTitle(_translate("Cinfo", "File"))
self.menuExport_As.setTitle(_translate("Cinfo", "Export As"))
self.menuOption.setTitle(_translate("Cinfo", "Option"))
self.menuHelp.setTitle(_translate("Cinfo", "Help"))
self.toolBar.setWindowTitle(_translate("Cinfo", "toolBar"))
self.actionExcel.setText(_translate("Cinfo", "Excel"))
self.actionExcel.setToolTip(_translate("Cinfo", "Export Record IntoExcel"))
self.actionJson.setText(_translate("Cinfo", "Json"))
self.actionJson.setToolTip(_translate("Cinfo", "Export into json File"))
self.actionText.setText(_translate("Cinfo", "Text"))
self.actionText.setToolTip(_translate("Cinfo", "Export Into Text File"))
self.actionRefresh.setText(_translate("Cinfo", "Refresh"))
self.actionRefresh.setToolTip(_translate("Cinfo", "refresh"))
self.actionRefresh.setShortcut(_translate("Cinfo", "Ctrl+F5"))
self.actionExit.setText(_translate("Cinfo", "Exit"))
self.actionExit.setToolTip(_translate("Cinfo", "Exit Window"))
self.actionExit.setShortcut(_translate("Cinfo", "Ctrl+Q"))
self.actionAbout.setText(_translate("Cinfo", "About"))
self.actionAbout.setToolTip(_translate("Cinfo", "Information "))
self.actionAbout.setShortcut(_translate("Cinfo", "Ctrl+I"))
self.actionHelp.setText(_translate("Cinfo", "Help"))
self.actionHelp.setShortcut(_translate("Cinfo", "Ctrl+F1"))
self.actionPreferences.setText(_translate("Cinfo", "Preferences"))
self.homePage.setChecked(True)
self.toggleCheck(self.homePage,0)
## Refresh Function
def refresh(self):
print("Refreshed")
## Toggle Check
def toggleCheck(self,toggledButton, response):
if response is 0 :
if toggledButton.isChecked() is True :
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 2, 4, 1, 1)
self.tables.clear()
self.tables.addItem("Home")
self.textBrowser.setHtml("""<style type="text/css">p, li { white-space: pre-wrap; }</style>
<center> <img src="./icons/logo.png" align="center"> </center>
<p align="center" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt;"><em><span style="color: rgb(251, 160, 38);"> </span></em></span><span style="color: rgb(251, 160, 38);"><em><span style=" font-family:'Cantarell'; font-size:11pt; font-weight:600;">Cinfo ( Computer Information ) </span></em></span><span style=" font-family:'Cantarell'; font-size:11pt; font-weight:600; vertical-align:sub;"><em><span style="color: rgb(251, 160, 38);">v1.0 </span></em></span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Cantarell'; font-size:11pt;">
<br>
</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt;">Welcome to Cinfo an all in one information board where you gett all information related to your machine.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Cantarell'; font-size:11pt;">
<br>
</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt; font-weight:600;">To get Started </span><span style=" font-family:'Cantarell'; font-size:11pt;">:</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt;">Choose service you want to be informed about, tick on the services and press the 'Let's Go' Button.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Cantarell'; font-size:11pt;">
<br>
</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt; font-weight:600;">Result</span><span style=" font-family:'Cantarell'; font-size:11pt;"> :</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt;">Your requested information will be right here in next moment, with title of information you requested.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Cantarell'; font-size:11pt;">
<br>
</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt; font-weight:600;">Support Us !!</span><span style=" font-family:'Cantarell'; font-size:11pt;"> :</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Cantarell'; font-size:11pt;">To show your support visit </span>
<a href="https://Github.com/chavarera/Cinfo" rel="noopener noreferrer" target="_blank"><span style=" font-family:'Cantarell'; font-size:11pt;">G</span><span style=" font-family:'Cantarell'; font-size:11pt;">itHub</span></a>
<a href="https://Github.com/chavarera/Cinfo"></a><span style=" font-family:'Cantarell'; font-size:11pt;"> page for the software and give us a star</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">
<a href="https://Github.com/chavarera/Cinfo"><span style=" font-family:'Cantarell'; font-size:11pt; text-decoration: underline; color:#0000ff;">https://Github.com/chavarera/Cinfo</span></a>
</p>""")
else:
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setProperty("showDropIndicator", True)
self.tableWidget.setShowGrid(True)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget.verticalHeader().setSortIndicatorShown(False)
self.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.tableWidget.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.gridLayout.addWidget(self.tableWidget, 2, 4, 1, 1)
if toggledButton.isChecked() is True and response is not 0:
self.returnData(response)
## TO CREATE A TABLE
def createTable(self,dataList):
self.tableWidget.setRowCount(len(dataList)-1)
self.tableWidget.setColumnCount(len(dataList[0]))
self.tableWidget.setHorizontalHeaderLabels(dataList[0])
dataList.pop(0)
for row in range(len(dataList)):
for column in range(len(dataList[0])):
try:
self.tableWidget.setItem(row, column, QtWidgets.QTableWidgetItem((dataList[row][column])))
except Exception as e:
pass
# CREATE A COMBOBOX FOR GIVEN FUNCTION
def createCombo(self, myDict):
self.tables.clear()
self.tables.addItem("Choose the appropriate Information ")
self.tables.addItems(myDict.keys())
while True:
try:
self.tables.currentIndexChanged.disconnect()
except Exception as e:
break
self.tables.currentIndexChanged.connect(lambda : self.bindFunctions(myDict))
self.tables.setCurrentIndex(1)
## WINDOWS BACKEND DRIVER FUNCTION
def windowsBackend(self):
print("Calling windows")
def bindFunctions(self,myDict):
if self.tables.currentText() not in ['','Choose the appropriate Information ','Home'] :
self.createTable(myDict[self.tables.currentText()])
## LINUX BACKEND DRIVER FUNCTION
def linuxBackend(self, response):
packages = get_package_list.get_package_list()
startup = get_startup_list.get_startup_list()
network = get_network_info.get_network_info()
browsers = get_browsers.get_browsers()
ports = get_ports.get_ports()
drives = get_drives.get_drives()
os_info = get_os_info.get_os_info()
hardware = get_hw_info.get_hw_info()
files = list_files.list_files()
data = ""
if response is 1:
self.createCombo(files.work())
elif response is 2:
self.createCombo(startup.work())
elif response is 3:
self.createCombo(packages.work())
elif response is 4:
self.createCombo(network.work())
elif response is 7:
self.createCombo(ports.work())
elif response is 6:
self.createCombo(browsers.work())
elif response is 5:
self.createCombo(os_info.work())
## CALLING APPROPRIATE FUNCTION FOR APPRORIATE OS
def returnData(self, response):
if os.name=='nt':
self.windowsBackend()
else:
self.linuxBackend(response)
## MAIN FUNCTION
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Cinfo = QtWidgets.QMainWindow()
ui = Ui_Cinfo()
ui.setupUi(Cinfo)
Cinfo.show()
sys.exit(app.exec_())
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,240
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_drives.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
from tabulate import tabulate
class get_drives:
'''
********* THIS SCRIPT RETURNS A VARIABLE CONTAINING DISK INFO IN HUMAN READABLE FORMT *********
CLASS get_drives DOCINFO:
get_drives HAVE TWO FUNCTIONS I.E.,
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A LIST.
2) REMOVING REDUNDANT DATA FROM LIST, AND MAKING SUBLIST OF ITEMS SO THAT THEY CAN BE USED LATER AS A SINGLE VARIABLE.
3) COLLECTING NAME OF ALL PARTITIONS AND CREATING A LIST OF AVAILABLE DISKS FROM PARTITIONS.
4) FINDING THE DISK AND PARTITION ON DISK HAVING LINUX BOOT FILES.
5) SAVING THE REFINED DATA IN A TABULAR FORMAT IN A SINGLE VARIABLE
6) RETURNING THE OBTAINED DATA IN A STRING VARIABLE.
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
'''
self.data = "" # FOR SAVING DATA COLLECTED INTO A SINGLE VARIABLE
self.temp_drive_list = [] # TO SAVE DRIVE LST TEMPORARILY
self.boot_partition = "" # STRING TO SAVE PARTITION NAME CONTAINING BOOT PARTITION
self.drives = [] # LIST TO STORE ALL THE DRIVE INFO COLLECTED FOR LATER USE
def work(self):
'''
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A LIST.
2) REMOVING REDUNDANT DATA FROM LIST, AND MAKING SUBLIST OF ITEMS SO THAT THEY CAN BE USED LATER AS A SINGLE VARIABLE.
3) COLLECTING NAME OF ALL PARTITIONS AND CREATING A LIST OF AVAILABLE DISKS FROM PARTITIONS.
4) FINDING THE DISK AND PARTITION ON DISK HAVING LINUX BOOT FILES.
5) SAVING THE REFINED DATA IN A TABULAR FORMAT IN A SINGLE VARIABLE
6) RETURNING THE OBTAINED DATA IN A STRING VARIABLE.
'''
disks_available = os.popen("df -h | grep -e '/dev/'").read() # READINGA ALL DRIVE INFO AND GRASPING ONLY PARTITIONS WHICH ARE READABLE TO USER
disk_list = disks_available.split('\n') # SAVING THE DATA COLLECTED IN A LIST FORMAT
disk_list = [file.split(' ') for file in disk_list] # SPLITTIG EACH DATA BLCOK INTO IT'S SUB-LIST SO THAT EACH MODULE CAN BE USED AS VARIABLE
for disk in disk_list: # REMOVING DRIVE LISTS WHICH ARE NOT REQUIRED
if not '/dev/' in disk[0]:
disk_list.remove(disk)
while True: # WHILE FUNCTION TO REMOVE INDUCED SPACES IN LIST WHOSE SIZE IS 0 OR ARE WHITESPACE
flag = True
for disk in disk_list:
for element in disk:
if len(element)==0 or element == '':
disk.remove(element)
flag = False
if flag:
break
# For claculating number of devices
for disk in disk_list:
disk_name = disk[0] # SAVING PARTITION NAME IN A TEMPORARY VARIABLE
for i in range(len(disk_name)-1, 0, -1): # TRACING NAME FROM REAR END
if not disk_name[i].isdigit(): # REMOVING NUMBER AT THE END OF VARIABLE NAME, SO THAT COMMON DRIVE CAN BE FETCHED
disk_name = disk_name[0:i+1]
break
if not disk_name in self.drives: # IF RECIEVED NAME IS NOT IN DRIVE LIST, IT IS ADDED TO THE LIST
self.drives.append(disk_name)
# For calculating boot partition
for disk in disk_list: # FINDING THE BOOT PARTITION AND DRIVE HAVIG THE BOOT PARTITION
if disk[5] == "/boot":
self.boot_partition = disk[0]
# WRITING DATA INTO A VARIABLE FOR BOOT DRIVE
for drive in self.drives:
if drive in self.boot_partition:
self.data += "------------------------------------------- DISK-1 ( Boot Drive ) --------------------------------------------\n"
self.data += "Linux Installed On : %s\n\n"%(self.boot_partition)
for disk in disk_list:
if drive in disk[0]:
self.temp_drive_list.append(disk)
self.data += tabulate(self.temp_drive_list, headers=['Partition Name', 'Total Size','Size Consumed', 'Size Remaining','Size Consumed( in percent )', 'Mounted On'],tablefmt="fancy_grid")
self.drives.remove(drive)
# WRITING DATA FOR REST OF DRIVES
for drive in self.drives:
self.data += "\n\n\n\n\n"
self.data += "-------------------------------------------------------- DISK-%d --------------------------------------------------------\n"%(self.drives.index(drive)+2)
self.temp_drive_list.clear()
for disk in disk_list:
if drive in disk[0]:
self.temp_drive_list.append(disk)
self.data += tabulate(self.temp_drive_list, headers=['Partition Name', 'Total Size','Size Consumed', 'Size Remaining','Size Consumed( in percent )', 'Mounted On'],tablefmt="fancy_grid")
self.data += "\n\n\n\n\n"
return self.data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,241
|
chavarera/Cinfo
|
refs/heads/master
|
/LinuxInfo.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
import threading
from timeit import default_timer as timer
from tabulate import tabulate
from lib.linux import get_browsers
from lib.linux import get_drives
from lib.linux import get_hw_info
from lib.linux import get_network_info
from lib.linux import get_os_info
from lib.linux import get_package_list
from lib.linux import get_ports
from lib.linux import get_startup_list
from lib.linux import list_files
## Creating objects for the classes in import files
packages = get_package_list.get_package_list()
startup = get_startup_list.get_startup_list()
network = get_network_info.get_network_info()
browsers = get_browsers.get_browsers()
ports = get_ports.get_ports()
drives = get_drives.get_drives()
os_info = get_os_info.get_os_info()
hardware = get_hw_info.get_hw_info()
files = list_files.list_files()
file_names = []
def indexing():
## ASKING FOR INDEXING
index_answer = input("Want to index all files in system, Y or N?\n(Note : It may take some time to index in first)\n")
if index_answer == 'Y' or index_answer == 'y':
try:
if files.work() == True:
file_names.append(["File Information","File list.csv"])
file_names.append(["File Type Overview","File Overview.json"])
except Exception as e:
print("Error occured while indexing")
file_names.append(["File Information","Error : try running with sudo"])
file_names.append(["File Type Overview","Error, try running with sudo"])
def other_works():
## WRITING MACHINE INFORMATION
try:
data = os_info.work()+"\n\n"+hardware.work()+"\n\n"+drives.work()+"\n\n"
current_path = os.getcwd()
if current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
current_path += "/output/"
os.chdir(current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("About Your Machine.txt","w") as about: # SAVNG DATA INTO FILE
about.write(data)
file_names.append[["Computer information","About Your Machine.txt"]]
except Exception as e:
file_names.append(["Computer Information","About Your Machine.txt"])
## WRIITING NETWORK INFORMATION
try:
file_names.append(["Network Information",network.work()])
except Exception as e:
file_names.append(["Network Information","Error getting information"])
## WRIITING OPEN PORTS INFORMATION
try:
file_names.append(["Open Ports in Machine",ports.work()])
except Exception as e:
file_names.append(["Open Ports in Machine","Error getting information"])
## WRIITING INSTALLED BROWSER INFORMATION
try:
file_names.append(["Installed Browsers",browsers.work()])
except Exception as e:
file_names.append(["Installed Browsers","Error getting information"])
## WRIITING INSTALLED PACKAGES INFORMATION
try:
file_names.append(["Installed Packages",packages.work()])
except Exception as e:
file_names.append(["Installed Packages","Error getting information"])
## WRIITING STARTUP APPLICATIONS INFORMATION
try:
file_names.append(["Startup Application",startup.work()])
except Exception as e:
file_names.append(["Startup Application","Error getting information"])
print("Please wait while indexing ends...")
t1 = threading.Thread(target=indexing)
t2 = threading.Thread(target=other_works)
start = timer()
t1.start()
t2.start()
t1.join()
end = timer()
print("Task done and dusted...\n\n")
print("You can find OUTPUT reports with mentioned file names in output folder...\n\n")
print("Task completed in %d seconds"%(end-start))
print(tabulate(file_names, headers=["Property", "File Name"],tablefmt="fancy_grid"))
print('\n\n\n')
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,242
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_startup_list.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
class get_startup_list:
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK CONTAINS INITIALISED VARIABLES FOR LATER USE.
'''
self.data = "" # TO SAVE FETCHED DATA
self.current_path = "" # TO GET THE CURRENT WORKING DIRECTORY
self.services = "" # THIS VARIABLES SAVED COMMAND LINE OUTPUT
self.service_list = [] # LIST TO SAVE THE OUTPUT IN A FORMATTED WAY
def work(self):
'''
work() DOCFILE:
THE work() FUNCTIONS WORKS IN FOLLOWING WAY:
1) SERVICE DATA IS COLLECTED IN A VARIABLE.
2) A LIST IS CREATED FROM THE VARIABLE.
3) REDUNDANT DATA IS REMOVED FROM THE LIST.
4) EACH ELEMENT IS SPLITTED INTO SUBLIST.
5) REDUNDANT DATA IS REMOVED FROM EVERY SUBLIST.
6) SERIAL NUMBER IS ADDED TO EVERY SUBLIST.
7) FIALLY FULL DATA IS WRITTEN INTO A SINGLE VARIABLE.
8) VARIABLE IS RETURNED AS RETURNED VALUE FROM THE FUNCTION.
'''
ret_data = {"List of Startup Programs" : [["Package Name","Status"]]}
self.services = os.popen("systemctl list-unit-files --type=service").read() # EXECUTING COMMAND AND SAVING THE OUTPUT IN STRING VARIABLE
self.service_list = self.services.split('\n') # SPLITTING THE SERVICES DATA INTO THE LIST
try:
while True: # REMOVING EXTRA INDUCED SPACES INTO THE LIST
self.service_list.remove('')
except Exception as e:
pass
self.service_list.pop() # REMOVING LAST LIST ELEMENT WHICH IS NOT NEEDED
self.service_list.pop(0) # REMOVING FIRST LIST ELEMENT WHICH IS REDUNDANT
for i in range(0, len(self.service_list)): # SPLITTING INDIVIDUAL ELEMENT INTO TWO PARTS i.e. SERVICE AND IT'S STATUS
self.service_list[i] = self.service_list[i].split(' ')
for service in self.service_list: # REMOVING EXTRA SPACES INDUCED IN EACH SUBLIST
try:
while True:
service.remove('')
except Exception as e:
pass
for i in range(0, len(self.service_list)): # HOVERING OVER THE WHOLE LIST TO EXECUTE SIMPLE FUNCTIONS
self.service_list[i].insert(0, "%d"%(i+1)) # ADDING SERIAL NUMBER TO SUBLIST FOR LATER TABLE PRINTING
if ".service" in self.service_list[i][1]: # REMOVING .Service IF EXISTS IN SERVICE NAME
self.service_list[i][1] = self.service_list[i][1].replace(".service", '')
if "@" in self.service_list[i][1]: # REMOVING @ IF EXISTS IN SERVICE NAME
self.service_list[i][1] = self.service_list[i][1].replace("@", '')
self.current_path = os.getcwd() # SAVING THE CURRENT WORKING DIRECTORY FOR LATER USE
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path)
self.data = ""
self.data += "S.No,Service,Status\n"
for i in self.service_list:
self.data+=i[0]+","+i[1]+","+i[2]+"\n"
ret_data["List of Startup Programs"].append([i[1],i[2]])
with open("startup applications.csv", 'w') as startup: # OPENNG NEW FILE TO SAVE DATA
startup.write(self.data) # WRITING DATA TO FILE
return ret_data # RETURNING THE VARIABLE FOR LATER USE THE DATA IN FORM OF MODULES
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,243
|
chavarera/Cinfo
|
refs/heads/master
|
/Cinfo.py
|
import os
if __name__=="__main__":
#check platform type and Run File(if Windows It will Import from WindowsInfo)
if os.name=='nt':
import WindowsInfo
else:
import LinuxInfo
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,244
|
chavarera/Cinfo
|
refs/heads/master
|
/lib/linux/get_ports.py
|
'''
Author : Deepak Chauhan
GitHub : https://github.com/royaleagle73
Email : 2018PGCACA63@nitjsr.ac.in
'''
import os
import re
class get_ports:
'''
********* THIS SCRIPT RETURNS A LIST OF TUPLE CONTAINING PORTS AND PROTOCOLS OPEN ON USER'S LINUX SYSTEM *********
CLASS get_ports DOCINFO:
get_ports HAVE TWO FUNCTIONS I.E.,
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
WORK() DOCFILE:
1) COLLECTS DATA FROM COMMANDLINE INTO STRING AND THEN SPLITTS INTO THE LIST.
2) TRAVERSES ON EVERY OUTPUT.
3) EXTRACTS ALL PORTS IN OUTPUT LINE.
4) CHECKS IF EXTRACTED PORTS COUNT IS GREATER THAN OR EQUAL TO 0.
5) REMOVS SEMI-COLON(:) FROM THE START OF PORT.
6) CHECKS IF THE EXTRACTED PORT EXIST BEFORE IN LIST.
7) EXTRACTS PROTOCOL FROM THE OUTPUT.
8) SAVES THE PROTOCOL AND PORT IN THE LIST.
9) SAVES THE PROTOCOL IN SECONDARY LIST FOR LATER COMPARISION.
10) RETURNS THE FINAL OUTPUT.
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
'''
self.data = [] # TO SAVE DATA RECIEVED FROM COMMAND INTO A STRING
self.final_list = [] # FOR SAVING BROWSER DATA COLLECTED INTO A SINGLE VARIABLE
self.secondary_port_list = [] # FOR SAVING ALL PORTS FOR LATER COMPARISION FOR DUPLICATE PORTS
self.protocol = "" # FOR EXTRACTING PROTOCOLS FROM ALL OUTPUTS
self.final_data = "" # FOR SAVING FINAL DATA IN A STRING
self.current_path = os.getcwd() # For SAVING CURRENT DIRECTORY INFORMATION
def work(self):
'''
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTS DATA FROM COMMANDLINE INTO STRING AND THEN SPLITTS INTO THE LIST.
2) TRAVERSES ON EVERY OUTPUT.
3) EXTRACTS ALL PORTS IN OUTPUT LINE.
4) CHECKS IF EXTRACTED PORTS COUNT IS GREATER THAN OR EQUAL TO 0.
5) REMOVS SEMI-COLON(:) FROM THE START OF PORT.
6) CHECKS IF THE EXTRACTED PORT EXIST BEFORE IN LIST.
7) EXTRACTS PROTOCOL FROM THE OUTPUT.
8) SAVES THE PROTOCOL AND PORT IN THE LIST.
9) SAVES THE PROTOCOL IN SECONDARY LIST FOR LATER COMPARISION.
10) RETURNS THE FINAL OUTPUT.
'''
ret_data = {"Open Ports List":[["Protocol","Port Number"]]}
data = os.popen("ss -lntu").read().split('\n') # COLLECTING DATA FROM COMMANDLINE INTO STRING AND THEN SPLITTING INTO THE LIST
for i in data: # TRAVERSING ON EVERY OUTPUT
self.ports_in_line = re.findall(r':\d{1,5}', i) # EXTRACTING ALL PORTS IN OUTPUT LINE
if len(self.ports_in_line) > 0 : # CHECKING IF EXTRACTED PORTS COUNT IS GREATER THAN OR EQUAL TO 0
self.extracted_port = self.ports_in_line[0][1:] # REMOVING SEMI-COLON(:) FROM THE START OF PORT
if self.extracted_port not in self.secondary_port_list: # CHECKING IF THE EXTRACTED PORT EXIST BEFORE IN LIST
self.protocol = i[:i.find(' ')] # EXTRACTING PROTOCOL FROM THE OUTPUT
self.final_list.append((self.protocol,self.extracted_port)) # SAVING THE PROTOCOL AND PORT IN THE LIST
self.secondary_port_list.append(self.extracted_port) # SAVING THE PROTOCOL IN SECONDARY LIST FOR LATER COMPARISION
self.final_data = "Protocol,Port\n"
for i in self.final_list:
self.final_data += i[0]+","+i[1]+"\n"
ret_data["Open Ports List"].append([i[0],i[1]])
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("Open Ports.csv", "w") as ports: # SAVING DATA INTO A FILE
ports.write(self.final_data)
return ret_data
|
{"/lib/windows/NetworkInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/HardwareInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/lib/windows/ServiceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/MiscInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/DeviceInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/StorageInfo.py": ["/lib/windows/common/CommandHandler.py"], "/lib/windows/SystemInfo.py": ["/lib/windows/common/CommandHandler.py", "/lib/windows/common/RegistryHandler.py"], "/Cinfo.py": ["/WindowsInfo.py", "/LinuxInfo.py"]}
|
21,262
|
kazi-arafat/custometfeedbackapp
|
refs/heads/master
|
/app.py
|
from flask import Flask,flash,render_template,request
from flask_sqlalchemy import SQLAlchemy
from send_mail import send_email
app = Flask(__name__)
ENV = "prod"
if (ENV == "dev"):
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:abc123@localhost/CustomerFeedback'
else:
app.debug = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://qhgxgzgfmalnxu:a2bc34670c77162e732c08c4404918b33e7ce9096d07be2ec3710bef10bd2541@ec2-107-20-239-47.compute-1.amazonaws.com:5432/d4603d4b6h369v'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class FeedbackForm(db.Model):
__tablename__ = 'feedback'
id = db.Column(db.Integer, primary_key=True)
customer = db.Column(db.String(200), unique=True)
dealer = db.Column(db.String(200))
rating = db.Column(db.Integer)
comments = db.Column(db.Text())
def __init__(self,customer,dealer,rating,comments):
self.customer = customer
self.dealer = dealer
self.rating = rating
self.comments = comments
@app.route("/")
def Index():
return render_template("index.html")
@app.route("/submit",methods=['POST'])
def Submit():
if (request.method == "POST"):
customer = request.form['customer']
dealer = request.form['dealer']
rating = request.form['rating']
comments = request.form['comments']
# print ("{0} {1} {2} {3}".format(customer,dealer,rating,comments))
if (customer == "" or dealer == ""):
return render_template("index.html",message="Please enter required fields.")
# Check if the customer already submitted feedback and then proceed with further steps
if (db.session.query(FeedbackForm).filter(FeedbackForm.customer == customer).count() == 0):
data = FeedbackForm(customer,dealer,rating,comments)
db.session.add(data)
db.session.commit()
send_email(customer, dealer, rating, comments)
return render_template("success.html")
return render_template("index.html",message="You have already submitted feedback.")
if (__name__ == "__main__"):
app.run()
|
{"/app.py": ["/send_mail.py"]}
|
21,263
|
kazi-arafat/custometfeedbackapp
|
refs/heads/master
|
/send_mail.py
|
import smtplib
from email.mime.text import MIMEText
def send_email(customer, dealer, rating, comments):
port = 587
userid = "40dc44b7a3fe59"
pwd = "b7183feda5fb84"
host = "smtp.mailtrap.io"
to_email = "arafatkazi2448@gmail.com"
from_email = "noReply@example.com"
mail_body = f"<h3>Customer Feedback</h3><hr><ul><li>Customer Name : {customer}</li><li>Dealer Name : {dealer}</li><li>Rating : {rating}</li><li>Comments : {comments}</li></ul>"
msg = MIMEText(mail_body,'html')
msg['Subject'] = "Customer Feedback"
msg['From'] = from_email
msg['To'] = to_email
# Send Email
with smtplib.SMTP(host=host,port=port) as smtpServer:
smtpServer.login(userid,pwd)
smtpServer.sendmail(to_email, from_email, msg.as_string())
|
{"/app.py": ["/send_mail.py"]}
|
21,277
|
deekshati/GetADoc-Flask
|
refs/heads/master
|
/migrations/versions/cbe32a1e2540_doctor_patients_table.py
|
"""Doctor & Patients table
Revision ID: cbe32a1e2540
Revises:
Create Date: 2020-08-24 19:11:23.284040
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cbe32a1e2540'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('doctor',
sa.Column('id', sa.String(length=120), nullable=False),
sa.Column('full_name', sa.String(length=64), nullable=True),
sa.Column('city', sa.String(length=20), nullable=True),
sa.Column('qual', sa.String(length=20), nullable=True),
sa.Column('fees', sa.Integer(), nullable=True),
sa.Column('phone', sa.Integer(), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_doctor_email'), 'doctor', ['email'], unique=True)
op.create_index(op.f('ix_doctor_full_name'), 'doctor', ['full_name'], unique=False)
op.create_table('patient',
sa.Column('id', sa.String(length=120), nullable=False),
sa.Column('full_name', sa.String(length=64), nullable=True),
sa.Column('city', sa.String(length=20), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_patient_email'), 'patient', ['email'], unique=True)
op.create_index(op.f('ix_patient_full_name'), 'patient', ['full_name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_patient_full_name'), table_name='patient')
op.drop_index(op.f('ix_patient_email'), table_name='patient')
op.drop_table('patient')
op.drop_index(op.f('ix_doctor_full_name'), table_name='doctor')
op.drop_index(op.f('ix_doctor_email'), table_name='doctor')
op.drop_table('doctor')
# ### end Alembic commands ###
|
{"/getadoc.py": ["/app/models.py"], "/app/routes.py": ["/app/forms.py", "/app/models.py"], "/app/forms.py": ["/app/models.py"]}
|
21,278
|
deekshati/GetADoc-Flask
|
refs/heads/master
|
/getadoc.py
|
from app import app, db
from app.models import Patient, Doctor, Appointment
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Patient': Patient, 'Doctor': Doctor, 'Appointment': Appointment}
|
{"/getadoc.py": ["/app/models.py"], "/app/routes.py": ["/app/forms.py", "/app/models.py"], "/app/forms.py": ["/app/models.py"]}
|
21,279
|
deekshati/GetADoc-Flask
|
refs/heads/master
|
/app/models.py
|
from app import db, login
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
@login.user_loader
def load_user(id):
if(id[0] == 'P'):
return Patient.query.get(id)
else:
return Doctor.query.get(id)
class Patient(UserMixin, db.Model):
id = db.Column(db.String(120), primary_key=True)
full_name = db.Column(db.String(64), index=True)
city = db.Column(db.String(20))
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(120))
appointments = db.relationship('Appointment', backref='patient', lazy='dynamic')
def __repr__(self):
return '<Patient {}>'.format(self.full_name)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Doctor(UserMixin, db.Model):
id = db.Column(db.String(120), primary_key=True)
full_name = db.Column(db.String(64), index=True)
city = db.Column(db.String(20))
qual = db.Column(db.String(20))
fees = db.Column(db.Integer)
phone = db.Column(db.Integer)
address = db.Column(db.String(120))
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(120))
appointments = db.relationship('Appointment', backref='doctor', lazy='dynamic')
def __repr__(self):
return '<Doctor {}>'.format(self.full_name)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
class Appointment(db.Model):
id = db.Column(db.Integer, primary_key=True)
requested_date = db.Column(db.Date)
appointment_date = db.Column(db.Date)
appointment_time = db.Column(db.Time)
doctor_id = db.Column(db.String(120), db.ForeignKey('doctor.id'))
patient_id = db.Column(db.String(120), db.ForeignKey('patient.id'))
reject_msg = db.Column(db.String(120))
status = db.Column(db.Integer)
|
{"/getadoc.py": ["/app/models.py"], "/app/routes.py": ["/app/forms.py", "/app/models.py"], "/app/forms.py": ["/app/models.py"]}
|
21,280
|
deekshati/GetADoc-Flask
|
refs/heads/master
|
/app/routes.py
|
from secrets import token_hex
from flask import render_template, url_for, redirect, flash, request
from app import app, db
from flask_login import current_user, login_user, logout_user, login_required
from app.forms import LoginForm, DoctorRegister, PatientRegister, AppointmentForm, confirmAppointment, rejectAppointment
from app.models import Patient, Doctor, Appointment
from werkzeug.urls import url_parse
from datetime import datetime
@app.route('/')
def home():
date = datetime.utcnow()
return render_template('home.html', date=date)
@app.route('/about')
@login_required
def about():
return render_template('about.html')
@app.route('/finddoctor')
@login_required
def finddoctor():
doctors = Doctor.query.filter_by(city=current_user.city).all()
return render_template('doctorlist.html', doclist=doctors)
@app.route('/book/<Did>', methods=['GET', 'POST'])
@login_required
def book(Did):
app = Appointment(doctor_id=Did, patient_id=current_user.id)
form = AppointmentForm(obj=app)
if form.validate_on_submit():
appoint = Appointment(requested_date=form.date.data, doctor_id=form.doctor_id.data, patient_id=form.patient_id.data, status=0)
db.session.add(appoint)
db.session.commit()
flash('Congratulations, your appointment is successfully booked!')
return redirect(url_for('home'))
return render_template('bookdoctor.html', form=form)
@app.route('/myappointments')
@login_required
def myappointments():
if(current_user.id[0] == 'P'):
pending_data = Appointment.query.filter_by(patient_id=current_user.id, status=0).all()
confirmed_data = Appointment.query.filter_by(patient_id=current_user.id, status=1).all()
rejected_data = Appointment.query.filter_by(patient_id=current_user.id, status=-1).all()
return render_template('pat_appointment.html', confirm=confirmed_data, pending=pending_data, reject=rejected_data)
else:
pending_data = Appointment.query.filter_by(doctor_id=current_user.id, status=0).all()
confirmed_data = Appointment.query.filter_by(doctor_id=current_user.id, status=1).all()
#print(pending_data)
return render_template('doc_appointment.html', confirm=confirmed_data, pending=pending_data)
@app.route('/confirmappointment/<aid>', methods=['GET', 'POST'])
@login_required
def confirmappointment(aid):
app = Appointment.query.filter_by(id=aid).first()
if(current_user.id[0] == 'P'):
return redirect(url_for('home'))
form = confirmAppointment()
if form.validate_on_submit():
app.appointment_date = form.appoint_date.data
app.appointment_time = form.appoint_time.data
#print(app.appointment_date, app.appointment_time)
app.status = 1
db.session.commit()
return redirect(url_for('myappointments'))
return render_template('confirm.html', form=form, request = app.requested_date)
@app.route('/rejectappointment/<aid>', methods=['GET', 'POST'])
@login_required
def rejectappointment(aid):
app = Appointment.query.filter_by(id=aid).first()
if(current_user.id[0] == 'P'):
return redirect(url_for('home'))
form = rejectAppointment()
if form.validate_on_submit():
app.reject_msg = form.rejectMessage.data
app.status = -1
db.session.commit()
return redirect(url_for('myappointments'))
return render_template('reject.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
if(form.choice.data == 'Patient'):
daba = Patient
else:
daba = Doctor
user = daba.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid Username or Password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('home')
return redirect(next_page)
return render_template('login.html', form=form)
@app.route('/register/<choice>', methods=['GET', 'POST'])
def register(choice):
if current_user.is_authenticated:
return redirect(url_for('home'))
idd = token_hex(16)
if(choice=='doctor'):
idd = 'D'+idd
form = DoctorRegister()
if form.validate_on_submit():
user = Doctor(id = idd, full_name=form.name.data, email=form.email.data, city=form.city.data, phone=form.phone.data, address=form.address.data, qual=form.qual.data, fees=form.fees.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
else:
idd = 'P'+idd
form = PatientRegister()
if form.validate_on_submit():
user = Patient(id=idd, full_name=form.name.data, email=form.email.data, city=form.city.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', choice=choice, form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
|
{"/getadoc.py": ["/app/models.py"], "/app/routes.py": ["/app/forms.py", "/app/models.py"], "/app/forms.py": ["/app/models.py"]}
|
21,281
|
deekshati/GetADoc-Flask
|
refs/heads/master
|
/app/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField, IntegerField, TextField
from wtforms.fields.html5 import DateField, TimeField, DateTimeField
from wtforms.validators import ValidationError, DataRequired, Email, Length, Optional
from app.models import Doctor, Patient
class LoginForm(FlaskForm):
choice = SelectField('Are you a Patient or Doctor?', choices=['Patient', 'Doctor'])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember me')
submit = SubmitField('Log In')
class DoctorRegister(FlaskForm):
name = StringField('Full Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
city = StringField('City', validators=[DataRequired()])
phone = IntegerField('Phone No.', validators=[DataRequired()])
address = StringField('Address', validators=[DataRequired()])
qual = StringField('Qualifications', validators=[DataRequired()])
fees = IntegerField('Fees per Person', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, email):
doctor = Doctor.query.filter_by(email=email.data).first()
if doctor is not None:
raise ValidationError('Email is already Registered!!')
class PatientRegister(FlaskForm):
name = StringField('Full Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
city = StringField('City', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, email):
patient = Patient.query.filter_by(email=email.data).first()
if patient is not None:
raise ValidationError('Email is already Registered!!')
class AppointmentForm(FlaskForm):
doctor_id = StringField('Doctor ID', validators=[Optional()])
patient_id = StringField('Patient ID', validators=[Optional()])
patient_name = StringField('Patient Name', validators=[DataRequired()])
mobile = IntegerField('Mobile Number', validators=[DataRequired()])
date = DateField('Enter Appointment Date', validators=[DataRequired()])
submit = SubmitField('Submit Request Form', validators=[DataRequired()])
class confirmAppointment(FlaskForm):
appoint_date = DateField("Appointment Date", validators=[DataRequired()])
appoint_time = TimeField("Appointment Time", validators=[DataRequired()])
submit = SubmitField("Confirm Appointment")
class rejectAppointment(FlaskForm):
rejectMessage = TextField('Reject Message', validators=[DataRequired()])
submit = SubmitField('Reject Appointment')
|
{"/getadoc.py": ["/app/models.py"], "/app/routes.py": ["/app/forms.py", "/app/models.py"], "/app/forms.py": ["/app/models.py"]}
|
21,289
|
elidiocampeiz/ArrowFieldTraversal
|
refs/heads/master
|
/GraphTraversal.py
|
import sys
from graph_utils import *
# DFS implementation that solves the Arrow Traversal problem
def dfs_arrows(graph, start, goal):
paths = {}
paths[start] = None
visited = set()
visited.add(start)
stack = []
stack.append(start)
while len(stack) != 0:
node = stack.pop()
if node == goal:
# print('found')
break
for next_node in get_edges(graph, node):
if not next_node in visited:
# print(node, next_node, stack)
visited.add(next_node)
paths[next_node] = node
stack.append(next_node)
# visited.remove(node)
return paths
if __name__ == "__main__":
if len(sys.argv) < 3:
print("\nInvalid number of arguments. Please include path of input and output files.\n")
else:
input_file, output_file = sys.argv[1], sys.argv[2]
graph = get_graph(input_file)
# print(graph)
n, m = len(graph), len(graph[0])
start, goal = (0,0), (n-1, m-1)
paths = dfs_arrows(graph, start, goal)
path = trace_path(graph, start, goal, paths)
# print(paths)
formated_path = format_path(path)
# print(path)
# print(formated_path)
# test_paths(formated_path, input_file)
write_file(output_file, formated_path)
|
{"/GraphTraversal.py": ["/graph_utils.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.