repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_fastText/old_code/model.py
|
# model.py
import torch
from torch import nn
from torch import Tensor
from torch.autograd import Variable
import numpy as np
from sklearn.metrics import accuracy_score
class fastText(nn.Module):
def __init__(self, config):
super(fastText, self).__init__()
self.config = config
# Hidden Layer
self.fc1 = nn.Linear(self.config.embed_size, self.config.hidden_size)
# Output Layer
self.fc2 = nn.Linear(self.config.hidden_size, self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
h = self.fc1(x)
z = self.fc2(h)
return self.softmax(z)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def run_epoch(self, train_data, val_data):
train_x, train_y = train_data[0], train_data[1]
val_x, val_y = val_data[0], val_data[1]
iterator = data_iterator(train_x, train_y, self.config.batch_size)
train_losses = []
val_accuracies = []
losses = []
for i, (x,y) in enumerate(iterator):
self.optimizer.zero_grad()
x = Tensor(x).cuda()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if (i + 1) % 50 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
self.eval()
all_preds = []
val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
for x, y in val_iterator:
x = Variable(Tensor(x))
y_pred = self.__call__(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(val_y, np.array(all_preds).flatten())
val_accuracies.append(score)
print("\tVal Accuracy: {:.4f}".format(score))
self.train()
return train_losses, val_accuracies
| 2,569
| 32.815789
| 82
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_fastText/old_code/config.py
|
# config.py
class Config(object):
embed_size = 300
hidden_size = 10
output_size = 4
max_epochs = 20
lr = 0.5
batch_size = 128
| 150
| 15.777778
| 21
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_fastText/old_code/train.py
|
# train.py
from utils import *
from config import Config
from sklearn.model_selection import train_test_split
import numpy as np
from tqdm import tqdm
import sys
import torch.optim as optim
from torch import nn, Tensor
from torch.autograd import Variable
import torch
from sklearn.metrics import accuracy_score
def get_accuracy(model, test_x, test_y):
all_preds = []
test_iterator = data_iterator(test_x, test_y)
for x, y in test_iterator:
x = Variable(Tensor(x))
y_pred = model(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(test_y, np.array(all_preds).flatten())
return score
if __name__=='__main__':
train_path = '../data/ag_news.train'
if len(sys.argv) > 2:
train_path = sys.argv[1]
test_path = '../data/ag_news.test'
if len(sys.argv) > 3:
test_path = sys.argv[2]
train_text, train_labels, vocab = get_data(train_path)
train_text, val_text, train_label, val_label = train_test_split(train_text, train_labels, test_size=0.2)
# Read Word Embeddings
w2vfile = '../data/glove.840B.300d.txt'
word_embeddings = get_word_embeddings(w2vfile, vocab.word_to_index, embedsize=300)
train_x = np.array([encode_text(text, word_embeddings) for text in tqdm(train_text)])
train_y = np.array(train_label)
val_x = np.array([encode_text(text, word_embeddings) for text in tqdm(val_text)])
val_y = np.array(val_label)
# Create Model with specified optimizer and loss function
##############################################################
config = Config()
model = fastText(config)
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_data = [train_x, train_y]
val_data = [val_x, val_y]
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_losses,val_accuracies = model.run_epoch(train_data, val_data)
print("\tAverage training loss: {:.5f}".format(np.mean(train_losses)))
print("\tAverage Val Accuracy (per 50 iterations): {:.4f}".format(np.mean(val_accuracies)))
# Reduce learning rate as number of epochs increase
if i > 0.5 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.25
if i > 0.75 * config.max_epochs:
print("Reducing LR")
for g in optimizer.param_groups:
g['lr'] = 0.15
# Get Accuracy of final model
test_text, test_labels, test_vocab = get_data(test_path)
test_x = np.array([encode_text(text, word_embeddings) for text in tqdm(test_text)])
test_y = np.array(test_labels)
train_acc = get_accuracy(model, train_x, train_y)
val_acc = get_accuracy(model, val_x, val_y)
test_acc = get_accuracy(model, test_x, test_y)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc))
| 3,314
| 36.247191
| 108
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_RCNN/utils.py
|
# utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score
| 4,498
| 37.452991
| 110
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_RCNN/model.py
|
# model.py
import torch
from torch import nn
import numpy as np
from torch.nn import functional as F
from utils import *
class RCNN(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(RCNN, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
# Bi-directional LSTM for RCNN
self.lstm = nn.LSTM(input_size = self.config.embed_size,
hidden_size = self.config.hidden_size,
num_layers = self.config.hidden_layers,
dropout = self.config.dropout_keep,
bidirectional = True)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Linear layer to get "convolution output" to be passed to Pooling Layer
self.W = nn.Linear(
self.config.embed_size + 2*self.config.hidden_size,
self.config.hidden_size_linear
)
# Tanh non-linearity
self.tanh = nn.Tanh()
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.hidden_size_linear,
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
# x.shape = (seq_len, batch_size)
embedded_sent = self.embeddings(x)
# embedded_sent.shape = (seq_len, batch_size, embed_size)
lstm_out, (h_n,c_n) = self.lstm(embedded_sent)
# lstm_out.shape = (seq_len, batch_size, 2 * hidden_size)
input_features = torch.cat([lstm_out,embedded_sent], 2).permute(1,0,2)
# final_features.shape = (batch_size, seq_len, embed_size + 2*hidden_size)
linear_output = self.tanh(
self.W(input_features)
)
# linear_output.shape = (batch_size, seq_len, hidden_size_linear)
linear_output = linear_output.permute(0,2,1) # Reshaping fot max_pool
max_out_features = F.max_pool1d(linear_output, linear_output.shape[2]).squeeze(2)
# max_out_features.shape = (batch_size, hidden_size_linear)
max_out_features = self.dropout(max_out_features)
final_out = self.fc(max_out_features)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies
| 4,267
| 35.793103
| 98
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_RCNN/config.py
|
# config.py
class Config(object):
embed_size = 300
hidden_layers = 1
hidden_size = 64
output_size = 4
max_epochs = 15
hidden_size_linear = 64
lr = 0.5
batch_size = 128
seq_len = None # Sequence length for RNN
dropout_keep = 0.8
| 269
| 18.285714
| 44
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_RCNN/train.py
|
# train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = RCNN(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc))
| 1,717
| 32.038462
| 98
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/utils.py
|
# utils.py
import torch
from torchtext import data
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, train_file, test_file=None, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
train_file (String): path to training file
test_file (String): path to test file
val_file (String): path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data)
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score
| 4,255
| 36.663717
| 110
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/model.py
|
# Model.py
import torch
import torch.nn as nn
from copy import deepcopy
from train_utils import Embeddings,PositionalEncoding
from attention import MultiHeadedAttention
from encoder import EncoderLayer, Encoder
from feed_forward import PositionwiseFeedForward
import numpy as np
from utils import *
class Transformer(nn.Module):
def __init__(self, config, src_vocab):
super(Transformer, self).__init__()
self.config = config
h, N, dropout = self.config.h, self.config.N, self.config.dropout
d_model, d_ff = self.config.d_model, self.config.d_ff
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
self.encoder = Encoder(EncoderLayer(config.d_model, deepcopy(attn), deepcopy(ff), dropout), N)
self.src_embed = nn.Sequential(Embeddings(config.d_model, src_vocab), deepcopy(position)) #Embeddings followed by PE
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.d_model,
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
embedded_sents = self.src_embed(x.permute(1,0)) # shape = (batch_size, sen_len, d_model)
encoded_sents = self.encoder(embedded_sents)
# Convert input to (batch_size, d_model) for linear layer
final_feature_map = encoded_sents[:,-1,:]
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies
| 3,390
| 35.858696
| 124
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/encoder.py
|
# encoder.py
from torch import nn
from train_utils import clones
from sublayer import LayerNorm, SublayerOutput
class Encoder(nn.Module):
'''
Transformer Encoder
It is a stack of N layers.
'''
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask=None):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
'''
An encoder layer
Made up of self-attention and a feed forward layer.
Each of these sublayers have residual and layer norm, implemented by SublayerOutput.
'''
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer_output = clones(SublayerOutput(size, dropout), 2)
self.size = size
def forward(self, x, mask=None):
"Transformer Encoder"
x = self.sublayer_output[0](x, lambda x: self.self_attn(x, x, x, mask)) # Encoder self-attention
return self.sublayer_output[1](x, self.feed_forward)
| 1,248
| 30.225
| 104
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/feed_forward.py
|
# feed_forward.py
from torch import nn
import torch.nn.functional as F
class PositionwiseFeedForward(nn.Module):
"Positionwise feed-forward network."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"Implements FFN equation."
return self.w_2(self.dropout(F.relu(self.w_1(x))))
| 515
| 31.25
| 58
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/sublayer.py
|
# sublayer.py
import torch
from torch import nn
class LayerNorm(nn.Module):
"Construct a layer normalization module."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerOutput(nn.Module):
'''
A residual connection followed by a layer norm.
'''
def __init__(self, size, dropout):
super(SublayerOutput, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
| 950
| 29.677419
| 71
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/train_utils.py
|
# train_utils.py
import torch
from torch import nn
from torch.autograd import Variable
import copy
import math
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Embeddings(nn.Module):
'''
Usual Embedding layer with weights multiplied by sqrt(d_model)
'''
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))
pe[:, 1::2] = torch.cos(torch.as_tensor(position.numpy() * div_term.unsqueeze(0).numpy()))#torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
| 1,577
| 34.863636
| 129
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/config.py
|
# config.py
class Config(object):
N = 1 #6 in Transformer Paper
d_model = 256 #512 in Transformer Paper
d_ff = 512 #2048 in Transformer Paper
h = 8
dropout = 0.1
output_size = 4
lr = 0.0003
max_epochs = 35
batch_size = 128
max_sen_len = 60
| 280
| 20.615385
| 43
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/attention.py
|
# attention.py
import torch
from torch import nn
import math
import torch.nn.functional as F
from train_utils import clones
def attention(query, key, value, mask=None, dropout=None):
"Implementation of Scaled dot product attention"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Multi-head attention"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
| 1,915
| 35.846154
| 76
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_Transformer/train.py
|
# train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
dataset = Dataset(config)
dataset.load_data(train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = Transformer(config, len(dataset.vocab))
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.Adam(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc))
| 1,640
| 31.82
| 98
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/data/query_wellformedness/reformat_data.py
|
import sys
import os
if __name__=='__main__':
if len(sys.argv) < 2:
print("Expected filename as an argument")
sys.exit()
filepath = sys.argv[1]
path, filename = os.path.split(filepath)
name, ext = os.path.splitext(os.path.basename(filename))
new_filepath = os.path.join(path, 'processed_'+name+'.txt')
with open(new_filepath, 'w') as new_file:
with open(filepath, 'r') as old_file:
for line in old_file:
question, number = line.strip().split('\t')
y = '2' if float(number) >= 0.5 else '1'
label = '__label__'+y
new_line = label + ' , ' + question + '\n'
new_file.write(new_line)
print('Finished')
| 634
| 29.238095
| 60
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_TextRNN/utils.py
|
# utils.py
import torch
from torchtext import data
from torchtext.vocab import Vectors
import spacy
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
class Dataset(object):
def __init__(self, config):
self.config = config
self.train_iterator = None
self.test_iterator = None
self.val_iterator = None
self.vocab = []
self.word_embeddings = {}
def parse_label(self, label):
'''
Get the actual labels from label string
Input:
label (string) : labels of the form '__label__2'
Returns:
label (int) : integer value corresponding to label string
'''
return int(label.strip()[-1])
def get_pandas_df(self, filename):
'''
Load the data into Pandas.DataFrame object
This will be used to convert data to torchtext object
'''
with open(filename, 'r') as datafile:
data = [line.strip().split(',', maxsplit=1) for line in datafile]
data_text = list(map(lambda x: x[1], data))
data_label = list(map(lambda x: self.parse_label(x[0]), data))
full_df = pd.DataFrame({"text":data_text, "label":data_label})
return full_df
def load_data(self, w2v_file, train_file, test_file, val_file=None):
'''
Loads the data from files
Sets up iterators for training, validation and test data
Also create vocabulary and word embeddings based on the data
Inputs:
w2v_file (String): absolute path to file containing word embeddings (GloVe/Word2Vec)
train_file (String): absolute path to training file
test_file (String): absolute path to test file
val_file (String): absolute path to validation file
'''
NLP = spacy.load('en')
tokenizer = lambda sent: [x.text for x in NLP.tokenizer(sent) if x.text != " "]
# Creating Field for data
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [("text",TEXT),("label",LABEL)]
# Load data from pd.DataFrame into torchtext.data.Dataset
train_df = self.get_pandas_df(train_file)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
test_df = self.get_pandas_df(test_file)
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
# If validation file exists, load it. Otherwise get validation data from training data
if val_file:
val_df = self.get_pandas_df(val_file)
val_examples = [data.Example.fromlist(i, datafields) for i in val_df.values.tolist()]
val_data = data.Dataset(val_examples, datafields)
else:
train_data, val_data = train_data.split(split_ratio=0.8)
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=False)
print ("Loaded {} training examples".format(len(train_data)))
print ("Loaded {} test examples".format(len(test_data)))
print ("Loaded {} validation examples".format(len(val_data)))
def evaluate_model(model, iterator):
all_preds = []
all_y = []
for idx,batch in enumerate(iterator):
if torch.cuda.is_available():
x = batch.text.cuda()
else:
x = batch.text
y_pred = model(x)
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
all_y.extend(batch.label.numpy())
score = accuracy_score(all_y, np.array(all_preds).flatten())
return score
| 4,498
| 37.452991
| 110
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_TextRNN/model.py
|
# model.py
import torch
from torch import nn
import numpy as np
from utils import *
class TextRNN(nn.Module):
def __init__(self, config, vocab_size, word_embeddings):
super(TextRNN, self).__init__()
self.config = config
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.config.embed_size)
self.embeddings.weight = nn.Parameter(word_embeddings, requires_grad=False)
self.lstm = nn.LSTM(input_size = self.config.embed_size,
hidden_size = self.config.hidden_size,
num_layers = self.config.hidden_layers,
dropout = self.config.dropout_keep,
bidirectional = self.config.bidirectional)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(
self.config.hidden_size * self.config.hidden_layers * (1+self.config.bidirectional),
self.config.output_size
)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
# x.shape = (max_sen_len, batch_size)
embedded_sent = self.embeddings(x)
# embedded_sent.shape = (max_sen_len=20, batch_size=64,embed_size=300)
lstm_out, (h_n,c_n) = self.lstm(embedded_sent)
final_feature_map = self.dropout(h_n) # shape=(num_layers * num_directions, 64, hidden_size)
# Convert input to (64, hidden_size * hidden_layers * num_directions) for linear layer
final_feature_map = torch.cat([final_feature_map[i,:,:] for i in range(final_feature_map.shape[0])], dim=1)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def reduce_lr(self):
print("Reducing LR")
for g in self.optimizer.param_groups:
g['lr'] = g['lr'] / 2
def run_epoch(self, train_iterator, val_iterator, epoch):
train_losses = []
val_accuracies = []
losses = []
# Reduce learning rate as number of epochs increase
if (epoch == int(self.config.max_epochs/3)) or (epoch == int(2*self.config.max_epochs/3)):
self.reduce_lr()
for i, batch in enumerate(train_iterator):
self.optimizer.zero_grad()
if torch.cuda.is_available():
x = batch.text.cuda()
y = (batch.label - 1).type(torch.cuda.LongTensor)
else:
x = batch.text
y = (batch.label - 1).type(torch.LongTensor)
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, y)
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if i % 100 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
val_accuracy = evaluate_model(self, val_iterator)
print("\tVal Accuracy: {:.4f}".format(val_accuracy))
self.train()
return train_losses, val_accuracies
| 3,586
| 37.569892
| 115
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_TextRNN/config.py
|
# config.py
class Config(object):
embed_size = 300
hidden_layers = 2
hidden_size = 32
bidirectional = True
output_size = 4
max_epochs = 10
lr = 0.25
batch_size = 64
max_sen_len = 20 # Sequence length for RNN
dropout_keep = 0.8
| 267
| 19.615385
| 46
|
py
|
Text-Classification-Models-Pytorch
|
Text-Classification-Models-Pytorch-master/Model_TextRNN/train.py
|
# train.py
from utils import *
from model import *
from config import Config
import sys
import torch.optim as optim
from torch import nn
import torch
if __name__=='__main__':
config = Config()
train_file = '../data/ag_news.train'
if len(sys.argv) > 2:
train_file = sys.argv[1]
test_file = '../data/ag_news.test'
if len(sys.argv) > 3:
test_file = sys.argv[2]
w2v_file = '../data/glove.840B.300d.txt'
dataset = Dataset(config)
dataset.load_data(w2v_file, train_file, test_file)
# Create Model with specified optimizer and loss function
##############################################################
model = TextRNN(config, len(dataset.vocab), dataset.word_embeddings)
if torch.cuda.is_available():
model.cuda()
model.train()
optimizer = optim.SGD(model.parameters(), lr=config.lr)
NLLLoss = nn.NLLLoss()
model.add_optimizer(optimizer)
model.add_loss_op(NLLLoss)
##############################################################
train_losses = []
val_accuracies = []
for i in range(config.max_epochs):
print ("Epoch: {}".format(i))
train_loss,val_accuracy = model.run_epoch(dataset.train_iterator, dataset.val_iterator, i)
train_losses.append(train_loss)
val_accuracies.append(val_accuracy)
train_acc = evaluate_model(model, dataset.train_iterator)
val_acc = evaluate_model(model, dataset.val_iterator)
test_acc = evaluate_model(model, dataset.test_iterator)
print ('Final Training Accuracy: {:.4f}'.format(train_acc))
print ('Final Validation Accuracy: {:.4f}'.format(val_acc))
print ('Final Test Accuracy: {:.4f}'.format(test_acc))
| 1,720
| 32.096154
| 98
|
py
|
bfh_python
|
bfh_python-master/arcslide.py
|
"""Arcslides and their type DD structures."""
from ddstructure import DDStrFromChords
from grading import SimpleDbGradingSet, SimpleDbGradingSetElement, \
SmallGradingGroup
from hdiagram import getArcslideDiagram
from pmc import Idempotent, PMC, Strands
from pmc import connectSumPMC, splitPMC
from utility import memorize
from utility import ACTION_LEFT
# Two types of arcslides:
UNDER_SLIDE, OVER_SLIDE = 0, 1
class Arcslide(object):
"""Represents an arcslide."""
def __init__(self, start_pmc, b1, c1):
"""Specifies the starting pmc, the sliding point (b1), and the point it
slides over (c1).
"""
assert b1 == c1-1 or b1 == c1+1
self.start_pmc, self.b1, self.c1 = start_pmc, b1, c1
self.n = self.start_pmc.n
self.b_pair = self.start_pmc.pairid[b1]
self.c_pair = self.start_pmc.pairid[c1]
self.b2 = start_pmc.otherp[self.b1]
self.c2 = start_pmc.otherp[self.c1]
if self.c1 < self.b1 < self.c2 or self.c2 < self.b1 < self.c1:
self.slide_type = UNDER_SLIDE
else:
self.slide_type = OVER_SLIDE
if self.b1 == self.c1-1:
if self.c2 > self.c1:
self.to_r = self._getShiftMap(self.b1, self.c1, self.c2)
else: # self.c2 < self.c1
self.to_r = self._getShiftMap(self.b1, self.c2+1, self.c1-2)
else: # self.b1 == self.c1+1
if self.c2 > self.c1:
self.to_r = self._getShiftMap(self.b1, self.c1+2, self.c2-1)
else: # self.c2 < self.c1
self.to_r = self._getShiftMap(self.b1, self.c2, self.c1)
self.end_pmc = PMC([(self.to_r[p], self.to_r[q])
for p, q in self.start_pmc.pairs])
self.pair_to_r = dict()
for i in range(self.n//2):
p, q = self.start_pmc.pairs[i]
self.pair_to_r[i] = self.end_pmc.pairid[self.to_r[p]]
def _getShiftMap(self, p, q1, q2):
"""Move p to the other side of the interval [q1,q2] (inclusive)."""
assert p == q1-1 or p == q2+1
n = self.start_pmc.n
result = dict()
for i in range(n):
if i == p:
if p == q1-1: result[i] = q2
else: result[i] = q1
elif q1 <= i <= q2:
if p == q1-1: result[i] = i-1
else: result[i] = i+1
else:
result[i] = i
return result
def __eq__(self, other):
return self.start_pmc == other.start_pmc and self.b1 == other.b1 \
and self.c1 == other.c1
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.start_pmc, self.b1, self.c1, "Arcslide"))
def __str__(self):
if self.slide_type == UNDER_SLIDE:
result = "Underslide of "
else:
result = "Overslide of "
result += "point %d over %d starting at %s" % \
(self.b1, self.c1, self.start_pmc)
return result
def __repr__(self):
return str(self)
def inverse(self):
"""Returns the inverse arcslide."""
return Arcslide(self.end_pmc, self.to_r[self.b1], self.to_r[self.c2])
@memorize
def getDDStructure(self, abs_gr_info = None):
"""Returns the type DD structure corresponding to this arcslide."""
self.all_idems = self.getIdems()
self.all_chords = []
if self.slide_type == UNDER_SLIDE:
for chord_type in self._UChords:
chord_type(self)
else:
for chord_type in self._OChords:
chord_type(self)
alg1 = self.start_pmc.getAlgebra()
alg2 = self.end_pmc.opp().getAlgebra()
ddstr = DDStrFromChords(alg1, alg2, self.all_idems, self.all_chords)
if abs_gr_info is not None:
self._getAbsGrading(ddstr, abs_gr_info)
else:
for gen in ddstr.getGenerators():
base_gen = gen
break
ddstr.registerHDiagram(getArcslideDiagram(self), base_gen)
return ddstr
def _getAbsGrading(self, ddstr, abs_gr_info, expand_after = True):
"""Find absolute grading for type DD structure (to replace finding a
relative grading). It can be shown that value of expand_after does not
matter.
"""
# Find grading of elements:
# Create Heegaard diagram for the expanded arcslide.
genus = self.start_pmc.genus
if expand_after:
ex_start_pmc = connectSumPMC(self.start_pmc, splitPMC(genus))
ex_slide = Arcslide(ex_start_pmc, self.b1, self.c1)
else:
ex_start_pmc = connectSumPMC(splitPMC(genus), self.start_pmc)
ex_slide = Arcslide(ex_start_pmc, 4*genus+self.b1, 4*genus+self.c1)
ex_end_pmc = ex_slide.end_pmc
ex_hdiagram = getArcslideDiagram(ex_slide)
hgens = ex_hdiagram.getHFGenerators()
# First step, find grading of two extreme generators, using one of
# which as the base generator.
base_idem = [Idempotent(ex_start_pmc, list(range(2*genus))),
Idempotent(ex_end_pmc.opp(), list(range(2*genus)))]
base_idem2 = [Idempotent(ex_start_pmc, list(range(2*genus, 4*genus))),
Idempotent(ex_end_pmc.opp(), list(range(2*genus, 4*genus)))]
if abs_gr_info == 0:
base_gen = ex_hdiagram.getGeneratorByIdem(base_idem, True)
else:
base_gen = ex_hdiagram.getGeneratorByIdem(base_idem2, True)
ex_gr_set, ex_grs = ex_hdiagram.computeDDGrading(base_gen)
# Second step
# # print ex_gr_set
# base_gr1 = ex_grs[base_gen]
# base_gr2 = ex_grs[base_gen2]
# # print base_gr1, base_gr2
# spinc11, spinc12 = base_gr1.data[0].spinc, base_gr1.data[1].spinc
# spinc21, spinc22 = base_gr2.data[0].spinc, base_gr2.data[1].spinc
# spincmavg1 = [-(a+b)/Fraction(2) for a,b in zip(spinc11, spinc21)]
# spincmavg2 = [-(a+b)/Fraction(2) for a,b in zip(spinc12, spinc22)]
# maslovavg = base_gr1.data[0].maslov + base_gr2.data[0].maslov + \
# base_gr1.data[1].maslov + base_gr2.data[1].maslov
# mavg1 = SmallGradingElement(
# base_gr1.data[0].parent, -maslovavg, spincmavg1)
# mavg2 = SmallGradingElement(
# base_gr2.data[0].parent, -Fraction(1,16), spincmavg2)
# ex_gr_set, ex_grs = ex_hdiagram.computeDDGrading(base_gen,
# (mavg1, mavg2))
# # print ex_gr_set
# # print ex_grs[base_gen], ex_grs[base_gen2]
# Form the grading set for the original arcslide from the extended one
periodic_domains = []
for ex_gr1, ex_gr2 in ex_gr_set.periodic_domains:
ex_spinc1, ex_spinc2 = ex_gr1.spinc, ex_gr2.spinc
if expand_after:
spinc1, spinc2 = ex_spinc1[0:2*genus], ex_spinc2[2*genus:]
else:
spinc1, spinc2 = ex_spinc1[2*genus:], ex_spinc2[0:2*genus]
if not (all([n == 0 for n in spinc1]) and
all([n == 0 for n in spinc2])):
gr1 = self.start_pmc.small_gr(ex_gr1.maslov, spinc1)
gr2 = self.end_pmc.opp().small_gr(ex_gr2.maslov, spinc2)
periodic_domains.append([gr1, gr2])
ddstr.gr_set = SimpleDbGradingSet(
SmallGradingGroup(self.start_pmc), ACTION_LEFT,
SmallGradingGroup(self.end_pmc.opp()), ACTION_LEFT,
periodic_domains)
# Now obtain the grading of generators
ddstr.grading = dict()
for hgen, ex_gr in list(ex_grs.items()):
ex_idem1, ex_idem2 = hgen.getDIdem()
ex_gr1, ex_gr2 = ex_gr.data
if expand_after:
pairs1 = [n for n in ex_idem1 if n < 2*genus]
pairs2 = [n-2*genus for n in ex_idem2 if n >= 2*genus]
else:
pairs1 = [n-2*genus for n in ex_idem1 if n >= 2*genus]
pairs2 = [n for n in ex_idem2 if n < 2*genus]
if len(pairs1) == genus:
# Get the corresponding generator in ddstr
cur_idem = [Idempotent(self.start_pmc, pairs1),
Idempotent(self.end_pmc.opp(), pairs2)]
cur_gen = [gen for gen in ddstr.getGenerators()
if [gen.idem1, gen.idem2] == cur_idem]
assert len(cur_gen) == 1
cur_gen = cur_gen[0]
# Finally get the grading
ex_spinc1, ex_spinc2 = ex_gr1.spinc, ex_gr2.spinc
if expand_after:
for i in range(2*genus):
assert ex_spinc2[i] == ex_spinc1[4*genus-i-1]
spinc1 = ex_spinc1[0:2*genus]
spinc2 = ex_spinc2[2*genus:]
else:
for i in range(2*genus, 4*genus):
assert ex_spinc2[i] == ex_spinc1[4*genus-i-1]
spinc1 = ex_spinc1[2*genus:]
spinc2 = ex_spinc2[0:2*genus]
gr = SimpleDbGradingSetElement(
ddstr.gr_set,
[self.start_pmc.small_gr(ex_gr1.maslov, spinc1),
self.end_pmc.opp().small_gr(ex_gr2.maslov, spinc2)])
if cur_gen in ddstr.grading:
assert ddstr.grading[cur_gen] == gr
else:
ddstr.grading[cur_gen] = gr
ddstr.checkGrading()
return ddstr
def getIdems(self):
"""Returns the set of possible idempotent-pairs for generators."""
all_idems = []
def shift_idem(idem):
"""Find the corresponding idempotent at right."""
return Idempotent(self.end_pmc, [self.pair_to_r[i] for i in idem])
left_idems = self.start_pmc.getIdempotents()
# Generators of type X (complementary)
for idem in left_idems:
all_idems.append((idem, shift_idem(idem).opp().comp()))
# Generators of type Y (sub-complementary)
for idem in left_idems:
if self.c_pair in idem and not self.b_pair in idem:
idem_data = list(shift_idem(idem).comp())
idem_data.remove(self.pair_to_r[self.b_pair])
idem_data.append(self.pair_to_r[self.c_pair])
right_idem = Idempotent(self.end_pmc, idem_data).opp()
all_idems.append((idem, right_idem))
return all_idems
def _addPair(self, data1, data2):
"""Add this pair of chord data."""
# For left chords, don't need to change anything
chord_left = Strands(self.start_pmc, data1)
# For right chords, find corresponding position at right and then take
# opposite.
data2 = [(self.to_r[p], self.to_r[q]) for p, q in data2]
data2 = [(self.n-1-q, self.n-1-p) for p, q in data2]
chord_right = Strands(self.end_pmc.opp(), data2)
self.all_chords.append((chord_left, chord_right))
def _U1Chords(self):
for x in range(self.n):
for y in range(x+1, self.n):
if x != self.b1 and y != self.b1 and \
self.start_pmc.otherp[x] != y:
self._addPair([(x, y)], [(x, y)])
def _U2Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
if b1 < c1: # so to_r[c2] < to_r[b1]
self._addPair([(b1, c1)], [])
self._addPair([], [(c2, b1)])
if b1 > c1: # so to_r[b1] < to_r[c2]
self._addPair([(c1, b1)], [])
self._addPair([], [(b1, c2)])
def _U3Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
if b1 < c1:
for x in range(c1+1, self.n):
self._addPair([(b1, x)], [(c1, x)])
for x in range(0, c2):
self._addPair([(x, c2)], [(x, b1)])
if b1 > c1:
for x in range(c2+1, self.n):
self._addPair([(c2, x)], [(b1, x)])
for x in range(0, c1):
self._addPair([(x, b1)], [(x, c1)])
def _U4Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
# Two connected chords
if b1 < c1:
for x in range(0, b1):
self._addPair([(x, b1)], [(x, c1)])
for x in range(c2+1, self.n):
if x != b1:
self._addPair([(c2, x)], [(b1, x)])
if b1 > c1:
for x in range(b1+1, self.n):
self._addPair([(b1, x)], [(c1, x)])
for x in range(0, c2):
if x != b1:
self._addPair([(x, c2)], [(x, b1)])
# Three connected chords
if b1 < c1:
for x in range(0, b1):
for y in range(c1+1, self.n):
self._addPair([(x, b1), (c1, y)], [(x, y)])
for x in range(0, c2):
for y in range(c2+1, self.n):
if y != b1:
self._addPair([(x, y)], [(x, c2), (b1, y)])
if b1 > c1:
for x in range(0, c1):
for y in range(b1+1, self.n):
self._addPair([(x, c1), (b1, y)], [(x, y)])
for x in range(0, c2):
for y in range(c2+1, self.n):
if x != b1:
self._addPair([(x, y)], [(x, b1), (c2, y)])
def _U5Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
sc, bc = min(c1, c2), max(c1, c2)
for x in range(0, sc):
for y in range(bc+1, self.n):
self._addPair([(x, sc), (bc, y)], [(x, sc), (bc, y)])
def _U6Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
if b1 < c1:
for x in range(c2+1, self.n):
if x != b1 and x != c1:
self._addPair([(c2, x), (b1, c1)], [(b1, x)])
for x in range(0, b1):
if x != c2:
self._addPair([(x, b1)], [(x, c1), (c2, b1)])
if b1 > c1:
for x in range(0, c2):
if x != b1 and x != c1:
self._addPair([(x, c2), (c1, b1)], [(x, b1)])
for x in range(b1+1, self.n):
if x != c2:
self._addPair([(b1, x)], [(c1, x), (b1, c2)])
_UChords = [_U1Chords, _U2Chords, _U3Chords, _U4Chords, _U5Chords, \
_U6Chords]
_O1Chords = _U1Chords
_O2Chords = _U2Chords
_O6Chords = _U6Chords
def _O3Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
if b1 < c1:
for x in range(c1+1, self.n):
if x != c2:
self._addPair([(b1, x)], [(c1, x)])
for x in range(0, c2):
if x != b1 and x != c1:
self._addPair([(x, c2)], [(x, b1)])
if b1 > c1:
for x in range(0, c1):
if x != c2:
self._addPair([(x, b1)], [(x, c1)])
for x in range(c2+1, self.n):
if x != b1 and x != c1:
self._addPair([(c2, x)], [(b1, x)])
def _O4Chords(self):
b1, c1, c2 = self.b1, self.c1, self.c2
# Two connected chords
if b1 < c1:
for x in range(0, b1):
self._addPair([(x, b1)], [(x, c1)])
for x in range(c2+1, self.n):
if x != b1:
self._addPair([(c2, x)], [(b1, x)])
if b1 > c1:
for x in range(b1+1, self.n):
self._addPair([(b1, x)], [(c1, x)])
for x in range(0, c2):
if x != b1:
self._addPair([(x, c2)], [(x, b1)])
# Three connected chords
if b1 < c1:
for x in range(0, b1):
for y in range(c1+1, self.n):
if y != c2:
self._addPair([(x, b1), (c1, y)], [(x, y)])
for x in range(0, c2):
for y in range(c2+1, self.n):
if x != b1 and x != c1:
self._addPair([(x, y)], [(x, c2), (b1, y)])
if b1 > c1:
for x in range(0, c1):
for y in range(b1+1, self.n):
if x != c2:
self._addPair([(x, c1), (b1, y)], [(x, y)])
for x in range(0, c2):
for y in range(c2+1, self.n):
if y != b1 and y != c1:
self._addPair([(x, y)], [(x, b1), (c2, y)])
def _O5Chords(self):
b1, b2, c1, c2 = self.b1, self.b2, self.c1, self.c2
sc, bc = min(c1, c2), max(c1, c2)
# Chords can be disjoint
for x in range(sc+1, bc):
for y in range(x+1, bc):
if x not in (b1, b2) and y not in (b1, b2):
self._addPair([(sc, x), (y, bc)], [(sc, x), (y, bc)])
# Or one can be contained in the other
for x in range(bc+1, self.n):
for y in range(sc+1, bc):
if x not in (b1, b2) and y not in (b1, b2):
self._addPair([(sc, x), (y, bc)], [(sc, x), (y, bc)])
for x in range(sc+1, bc):
for y in range(0, sc):
if x not in (b1, b2) and y not in (b1, b2):
self._addPair([(sc, x), (y, bc)], [(sc, x), (y, bc)])
def _O_BasicChoice(self):
# Uses one standard way of forming basic choice
b1, c1, c2 = self.b1, self.c1, self.c2
# Type O3: those using sigma', not those using sigma
if b1 < c1:
self._addPair([(c1, c2)], [(c1, b1)])
if b1 > c1:
self._addPair([(c2, c1)], [(b1, c1)])
# Type O4: three connected chords, two on left
if b1 < c1:
for x in range(0, b1):
self._addPair([(x, b1), (c1, c2)], [(x, c2)])
if b1 > c1:
for y in range(b1+1, self.n):
self._addPair([(c2, c1), (b1, y)], [(c2, y)])
# Type O7: those with a break on the left
if c1 < c2:
for x in range(c1+1, c2):
self._addPair([(c1, x), (x, c2)], [(c1, c2)])
if c2 < c1:
for x in range(c2+1, c1):
self._addPair([(c2, x), (x, c1)], [(c2, c1)])
# Type O8: None chosen
_OChords = [_O1Chords, _O2Chords, _O3Chords, _O4Chords, _O5Chords, \
_O6Chords, _O_BasicChoice]
| 18,536
| 40.101996
| 82
|
py
|
bfh_python
|
bfh_python-master/utilitytest.py
|
"""Unit test for utility.py"""
from utility import *
import unittest
class ToListTest(unittest.TestCase):
def testToList(self):
"""Testing various cases for the tolist function."""
self.assertEqual(tolist(3), [3])
self.assertEqual(tolist("3"), ["3"])
self.assertEqual(tolist(()), [()])
self.assertEqual(tolist([]), [])
self.assertEqual(tolist([3, 4]), [3, 4])
class SubsetTest(unittest.TestCase):
def testSubset(self):
self.assertEqual(sorted(subset([])), [()])
self.assertEqual(sorted(subset([0])), [(), (0,)])
self.assertEqual(sorted(subset(['a', 'b'])),
[(), ('a',), ('a', 'b'), ('b',)])
class SummableDictTest(unittest.TestCase):
def setUp(self):
self.a = SummableDict({"a" : 5, "b" : 3, "d" : 3})
self.b = SummableDict({"a" : 5, "c" : 4, "d" : -3})
self.c = SummableDict({})
self.sumab = SummableDict({"a" : 10, "b" : 3, "c" : 4})
self.a2 = SummableDict({"a" : 10, "b" : 6, "d" : 6})
self.refa = self.a.copy()
self.refb = self.b.copy()
def testDictAdd(self):
"""Testing __add__ operation in SummableDict."""
self.assertEqual(self.a + self.b, self.sumab)
self.assertEqual(self.a + self.a, self.a2)
self.assertEqual(self.a, self.refa)
self.assertEqual(self.b, self.refb)
def testDictIAdd(self):
"""Testing __iadd__ and accumulate operation in SummableDict."""
self.a += self.c
self.assertEqual(self.a, self.refa)
self.assertEqual(self.a.accumulate([self.b, self.c]), self.sumab)
self.a = self.refa
self.a += self.a
self.assertEqual(self.a, self.a2)
self.assertEqual(self.b, self.refb)
self.assertEqual(self.c, {})
def testDictMul(self):
"""Testing __mul__ and __rmul__ operation in SummableDict."""
self.assertEqual(self.a * 2, self.a2)
self.assertEqual(2 * self.a, self.a2)
self.assertEqual(0 * self.a, {})
def testDictEqual(self):
self.assertTrue(self.a == self.refa)
self.assertTrue(self.a != self.b)
self.assertTrue(self.a != 0)
self.assertTrue(self.c == 0)
def testDictTranslate(self):
d1 = SummableDict({"a" : 5, "b" : 4, "c" : 3})
d2 = d1.translateKey(dict({"a" : "aa", "b" : "bb", "c" : "cc"}))
self.assertTrue(d2 != d1)
self.assertEqual(d2, SummableDict({"aa" : 5, "bb" : 4, "cc" : 3}))
class FiniteRingTest(unittest.TestCase):
def testFiniteRing(self):
self.assertTrue(F2.one != 0)
self.assertEqual(F2.one + F2.one, 0)
class IntegerRingTest(unittest.TestCase):
def testIntegerRing(self):
INT = Integer()
P1 = IntegerElement(INT, 1)
N1 = IntegerElement(INT, -1)
self.assertEqual(P1, 1)
self.assertEqual(N1, -1)
self.assertEqual(P1 + N1, 0)
self.assertTrue(P1 != 0)
if __name__ == "__main__":
unittest.main()
| 3,009
| 34.411765
| 74
|
py
|
bfh_python
|
bfh_python-master/involutivetest.py
|
"""Unit test for involutive.py"""
from involutive import *
from dstructure import zeroTypeD, infTypeD
from arcslide import Arcslide
from arcslideda import ArcslideDA
from pmc import splitPMC
import unittest
class InvolutiveTest(unittest.TestCase):
def testInvolutiveRankS3(self):
"Check that the involutive Floer homology of S^3 is F_2^2."
P = zeroTypeD(1)
Q = infTypeD(1)
cx = involutiveCx(P,Q)
self.assertTrue(len(cx)==2)
def testInvolutiveRankS2S1(self):
"Check that the involutive Floer homology of S^2 x S^1 is F_2^4."
P = zeroTypeD(1)
cx = involutiveCx(P,P)
self.assertTrue(len(cx)==4)
def testInvolutiveRankL31(self):
"Check that the involutive Floer homology of L(3,1) is F_2^4."
P = infTypeD(1)
Q = infTypeD(1)
R = ArcslideDA(Arcslide(splitPMC(1),1,0))
Q2 = R.tensorD(Q)
Q3 = R.tensorD(Q2)
Q4 = R.tensorD(Q3)
othcx = P.morToD(Q4)
othcx.simplify()
cx = involutiveCx(P,Q3)
self.assertTrue(len(othcx)==3)
self.assertTrue(len(cx)==4)
def testInvolutiveRankS2S1S2S1(self):
"Check that the involutive Floer homology of a connect sum of S^2 x S^1 with itself is SOMETHING."
P = zeroTypeD(2)
cx = involutiveCx(P,P)
self.assertTrue(len(cx)==8)
def testIFHDCov(self):
"Check the involutive Floer homology of some branched double covers."
self.assertTrue(len(invOfDCov("4_1 3 1 0 5 4 3 2 4 1 2 3 2 1 2 1 0 5 4 3 2 1 0"))==6)
if __name__ == "__main__":
unittest.main()
| 1,614
| 30.666667
| 106
|
py
|
bfh_python
|
bfh_python-master/localpmc.py
|
"""This module offers minimal support for PMC with boundaries and unmatched
points. A normal PMC (defined in pmc.py) can be split into one or several
local PMC's like this.
"""
from algebra import DGAlgebra, Element, Generator
from algebra import E0
from pmc import Strands, StrandDiagram
from utility import memorize, memorizeHash, subset
from utility import F2
import itertools
class LocalPMC(object):
"""Represents a pointed matched circle with boundaries and unmatched
points.
"""
def __init__(self, n, matching, endpoints):
"""Creates a pointed matched circle with n points (including
endpoints).
- matching: a list of tuples, with each tuple containing either one
(for unpaired point) or two (for matched pair) points.
- endpoints: list of endpoints. Must be disjoint from numbers involved
in matching.
"""
self.n = n
self.endpoints = endpoints
# Total number of pairs and single points.
self.num_pair = len(matching)
# otherp[i] is the point paired to i. Equals -1 for boundary point, and
# i for unpaired points.
self.otherp = [-1] * self.n
for pair in matching:
if len(pair) == 2:
self.otherp[pair[0]] = pair[1]
self.otherp[pair[1]] = pair[0]
else: # len(pair) == 1
self.otherp[pair[0]] = pair[0]
for endpoint in endpoints:
assert self.otherp[endpoint] == -1
# pairid[i] is the ID of the pair containing i.
# 0 <= pairid[i] < num_pair if i is not an endpoint.
# pairid[i] = -1 if i is an endpoint.
self.pairid = [-1] * self.n
# pairs[i] is the pair of points with ID i (0 <= i < num_pair)
self.pairs = []
pair_count = 0
for pos in range(self.n):
if pos in endpoints: continue
if self.pairid[pos] == -1:
self.pairid[pos] = self.pairid[self.otherp[pos]] = pair_count
if pos == self.otherp[pos]:
self.pairs.append((pos,))
else:
self.pairs.append((pos, self.otherp[pos]))
pair_count += 1
assert pair_count == self.num_pair
def __eq__(self, other):
return self.otherp == other.otherp
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(tuple(self.otherp))
def __str__(self):
return str(self.pairs)
def __repr__(self):
return "LocalPMC(%s)" % str(self)
def sd(self, data):
"""Simple way to obtain a local strand diagram for this local PMC. Each
element of data is either an integer or a pair. An integer specifies a
single or double horizontal at this position (and its paired position,
if any). A pair (p, q) specifies a strand from p to q.
"""
parent = self.getAlgebra()
left_idem = []
strands = []
for d in data:
if isinstance(d, int):
assert self.pairid[d] != -1
left_idem.append(self.pairid[d])
else:
if self.pairid[d[0]] != -1:
left_idem.append(self.pairid[d[0]])
strands.append(d)
return LocalStrandDiagram(parent, left_idem, strands)
def getAlgebra(self):
"""Returns the local strand algebra for this local PMC."""
return LocalStrandAlgebra(F2, self)
def getSingleIdems(self):
"""Return the list of indices of pairs / single points that are single
points.
"""
return [i for i in range(self.num_pair) if len(self.pairs[i]) == 1]
def getIdempotents(self):
"""Get the list of all idempotents (no restriction on size)."""
return [LocalIdempotent(self, data) for sz in range(self.num_pair+1)
for data in itertools.combinations(list(range(self.num_pair)), sz)]
def getStrandDiagrams(self):
"""Returns the list of generators of the local strand algebra. Note we
automatically impose the multiplicity-one condition, and there are no
constraints on the size of idempotents.
"""
algebra = self.getAlgebra()
result = []
def search(cur_strands):
"""Search starting with the given list of strands. May only add
strands after the end position of the last strand.
"""
# First, check if the current list of strands is valid.
left_occupied = [0] * self.num_pair
right_occupied = [0] * self.num_pair
for start, end in cur_strands:
start_id, end_id = self.pairid[start], self.pairid[end]
if start_id != -1:
left_occupied[start_id] += 1
if end_id != -1:
right_occupied[end_id] += 1
if any([n >= 2 for n in left_occupied + right_occupied]):
# There should not be two strands starting or ending at points
# in the same pair.
return
# Enumerate all possible ways of adding idempotents.
empty_idems = [i for i in range(self.num_pair)
if left_occupied[i] == 0 and right_occupied[i] == 0]
left_idem = [i for i in range(self.num_pair)
if left_occupied[i] > 0]
for idems_to_add in subset(empty_idems):
result.append(LocalStrandDiagram(
algebra, left_idem + list(idems_to_add), cur_strands))
# Now enumerate all ways of adding more strands.
last_end = 0
if len(cur_strands) > 0:
last_end = cur_strands[-1][1]
for start in range(last_end, self.n):
for end in range(start + 1, self.n):
if self.pairid[start] == -1 and self.pairid[end] == -1 and \
end == start + 1:
# Exclude cases where a strand goes from an
# end-boundary-point to a start-boundary-point
break
search(cur_strands + [(start, end)])
if self.pairid[end] == -1:
# No strand should go beyond an end-boundary-point.
break
search([])
return result
class LocalIdempotent(tuple):
"""Represents a local idempotent in a certain local PMC. Stored as a tuple
of occupied pairs.
"""
def __new__(cls, local_pmc, data):
return tuple.__new__(cls, tuple(sorted(data)))
def __init__(self, local_pmc, data):
self.local_pmc = local_pmc
def __eq__(self, other):
if isinstance(other, LocalIdempotent):
return self.local_pmc == other.local_pmc and \
tuple.__eq__(self, other)
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.local_pmc, tuple(self), "LocalIdempotent"))
def __str__(self):
return repr(self)
def __repr__(self):
return "(%s)" % ",".join(str(self.local_pmc.pairs[i]) for i in self)
def removeSingleHor(self, idems = None):
"""Returns a new idempotent with single points removed.
If idems is None (default case): all single idempotents will be removed.
Otherwise, idems must be a list containing single idems to be removed.
"""
return LocalIdempotent(
self.local_pmc, [i for i in self
if len(self.local_pmc.pairs[i]) == 2 or
(idems is not None and i not in idems)])
def toAlgElt(self):
"""Get the local strand diagram corresponding to this idempotent (the
strand algebra is uniquely specified from the local PMC.
"""
return LocalStrandDiagram(self.local_pmc.getAlgebra(), self, [])
class LocalStrands(tuple):
"""Represents a fixed list of strands in a local PMC. Stored as a tuple of
pairs.
"""
def __new__(cls, local_pmc, data):
return tuple.__new__(cls, tuple(sorted(data)))
def __init__(self, local_pmc, data):
self.local_pmc = local_pmc
# Compute multiplicity. multiplicity[i] represents the multiplicity
# on the interval (i, i+1). Intervals that are gaps between two
# boundary points should never be occupied, but we do not check for it
# here.
self.multiplicity = [0] * (self.local_pmc.n - 1)
for st in self:
assert len(st) == 2 and st[0] < st[1]
for pos in range(st[0], st[1]):
self.multiplicity[pos] += 1
def propagateRight(self, left_idem):
"""Find the right_idem given left_idem and strand info. This is similar
to propagateRight in pmc.py, except we should note that idempotents can
appear or disappear due to strands going into and out of the boundary.
"""
pmc = self.local_pmc
idem_count = [0] * pmc.num_pair
for pair in left_idem:
idem_count[pair] += 1
for st in self:
if st[0] not in pmc.endpoints:
if idem_count[pmc.pairid[st[0]]] == 0: return None
idem_count[pmc.pairid[st[0]]] -= 1
for st in self:
if st[1] not in pmc.endpoints:
if idem_count[pmc.pairid[st[1]]] == 1: return None
idem_count[pmc.pairid[st[1]]] += 1
right_idem = [i for i in range(pmc.num_pair) if idem_count[i] == 1]
return LocalIdempotent(pmc, right_idem)
class LocalStrandDiagram(Generator):
"""A strand diagram in an local PMC.
Multiplicity-one condition is hard-coded in. Dealing with multiplicity
greater than one when there are strands going to the boundary can be more
subtle.
"""
def __init__(self, parent, left_idem, strands):
"""Specifies the parent algebra (which contains the local PMC), left
idempotent, and strands.
Input parameters:
- parent: must be an object of LocalStrandAlgebra.
- left_idem: tuple containing IDs of occupied pairs.
- strands: tuple of pairs specifying strands.
"""
Generator.__init__(self, parent)
self.local_pmc = parent.local_pmc
self.left_idem = left_idem
if not isinstance(self.left_idem, LocalIdempotent):
self.left_idem = LocalIdempotent(self.local_pmc, self.left_idem)
if not isinstance(strands, LocalStrands):
strands = LocalStrands(self.local_pmc, strands)
self.strands = strands
# Get right_idem and multiplicity from strands
self.right_idem = self.strands.propagateRight(self.left_idem)
self.multiplicity = self.strands.multiplicity
# Enumerate single and double horizontals
self.all_hor = list(self.left_idem)
for st in self.strands:
start_idem = self.local_pmc.pairid[st[0]]
if start_idem != -1:
if start_idem not in self.all_hor:
print(left_idem, strands)
self.all_hor.remove(start_idem)
self.all_hor = tuple(self.all_hor)
self.single_hor = tuple([i for i in self.all_hor
if len(self.local_pmc.pairs[i]) == 1])
self.double_hor = tuple([i for i in self.all_hor
if len(self.local_pmc.pairs[i]) == 2])
def isIdempotent(self):
"""Tests whether this generator is an idempotent."""
return len(self.strands) == 0
def getLeftIdem(self):
"""Return the left idempotent."""
return self.left_idem
def getRightIdem(self):
"""Return the right idempotent."""
return self.right_idem
def __str__(self):
return "[%s]" % \
",".join([str(self.local_pmc.pairs[i]) for i in self.all_hor] +
["%s->%s" % (p, q) for (p, q) in self.strands])
def __repr__(self):
return str(self)
def inputForm(self):
"""Output in the form used for sd."""
return "[%s]" % \
", ".join([str(self.local_pmc.pairs[i][0]) for i in self.all_hor] +
["(%s, %s)" % (p, q) for (p, q) in self.strands])
def __eq__(self, other):
return self.parent == other.parent and \
self.left_idem == other.left_idem and \
self.strands == other.strands
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash((self.parent, tuple(self.left_idem),
tuple(self.strands)))
@memorize
def removeSingleHor(self, idems = None):
"""Return a local strand diagram that is just like this, except with
some single horizontal lines removed.
If idems is None (default case): all single idempotents will be removed.
Otherwise, idems must be a list containing single idems to be removed.
"""
new_left_idem = list(self.left_idem)
for i in self.single_hor:
if idems is None or i in idems:
new_left_idem.remove(i)
return LocalStrandDiagram(self.parent, new_left_idem, self.strands)
@memorize
def addSingleHor(self, idems):
"""Opposite of removeSingleHor. Add some single idempotents. The
conditions on inputs are more strict than in removeSingleHor. idems must
be provided and must be idempotents not already in self.
"""
new_left_idem = list(self.left_idem)
for i in idems:
assert i not in self.left_idem
assert len(self.parent.local_pmc.pairs[i]) == 1
new_left_idem.append(i)
return LocalStrandDiagram(self.parent, new_left_idem, self.strands)
# Don't need anything beyond Element at this time.
LocalStrandDiagram.ELT_CLASS = Element
class LocalStrandAlgebra(DGAlgebra):
"""Represents the strand algebra of a local PMC."""
def __init__(self, ring, local_pmc):
"""Specifies the local PMC. Note that unlike StrandAlgebra, the size of
idempotent is variable, and we only implement the multiplicity one case.
"""
DGAlgebra.__init__(self, ring)
self.local_pmc = local_pmc
def __str__(self):
return "Local strand algebra over %s" % str(self.local_pmc)
def __eq__(self, other):
return self.local_pmc == other.local_pmc
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash(("LocalStrandAlgebra", self.local_pmc))
@memorize
def diff(self, gen):
cur_strands = gen.strands
result = E0
# In multiplicity one case, only need to worry about uncrossing a moving
# strand with a horizontal. Also, no need to worry about double
# crossing.
for st in cur_strands:
for i in gen.all_hor:
for p in gen.local_pmc.pairs[i]:
if st[0] <= p and p <= st[1]:
new_strands = list(cur_strands)
new_strands.remove(st)
new_strands.extend([(st[0], p), (p, st[1])])
result += LocalStrandDiagram(
self, gen.left_idem, new_strands).elt()
return result
@memorize
def getGenerators(self):
return self.local_pmc.getStrandDiagrams()
def multiply(self, gen1, gen2):
if not isinstance(gen1, LocalStrandDiagram):
return NotImplemented
if not isinstance(gen2, LocalStrandDiagram):
return NotImplemented
assert gen1.parent == self and gen2.parent == self, \
"Algebra not compatible."
if gen1.right_idem != gen2.left_idem:
return E0
pmc = self.local_pmc
# Multiplicity-one condition
total_mult = [m1+m2 for m1, m2 in zip(gen1.multiplicity,
gen2.multiplicity)]
if not all([x <= 1 for x in total_mult]):
return E0
new_strands = []
# Keep track of which strands at right are not yet used.
strands_right = list(gen2.strands)
for sd in gen1.strands:
mid_idem = pmc.pairid[sd[1]]
if mid_idem == -1:
# Strands going to the boundary go to the product
new_strands.append((sd[0], sd[1]))
continue
possible_match = [sd2 for sd2 in strands_right
if pmc.pairid[sd2[0]] == mid_idem]
if len(possible_match) == 0:
new_strands.append(sd)
else: # len(possible_match) == 1
sd2 = possible_match[0]
if sd2[0] != sd[1]:
return E0
else:
new_strands.append((sd[0], sd2[1]))
strands_right.remove(sd2)
new_strands.extend(strands_right)
# Since we are in the multiplicity-one case, no need to worry about
# double-crossing. Can return now.
return LocalStrandDiagram(self, gen1.left_idem, new_strands).elt()
class PMCSplitting(object):
"""Contains information about a splitting of a full PMC into two local PMCs.
"""
def __init__(self, pmc, intervals):
"""Given a full PMC and a list of intervals (specified by pairs),
construct the splitting where local_pmc is the restriction of pmc to the
intervals, and outer_pmc is the restriction of pmc to the complement of
the intervals.
- intervals: must be ordered, disjoint intervals. Each interval is
specified in the format (start, end), which represents the interval
between these two points. Intervals with start > end are ignored.
"""
self.pmc = pmc
self.intervals = tuple(intervals)
outer_intervals = PMCSplitting.complementIntervals(self.pmc, intervals)
self.local_pmc, self.local_mapping = \
PMCSplitting.restrictPMC(self.pmc, intervals)
self.outer_pmc, self.outer_mapping = \
PMCSplitting.restrictPMC(self.pmc, outer_intervals)
def __str__(self):
return "Splitting of %s with local intervals %s.\n" % (self.pmc,
self.intervals)
def __eq__(self, other):
return self.pmc == other.pmc and self.intervals == other.intervals
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash(("PMCSplitting", self.pmc, self.intervals))
def restrictStrandDiagramLocal(self, sd):
"""Returns the local strand diagram that is the restriction of sd to
local_pmc.
"""
# Memorize within sd.
if not hasattr(sd, "restrictLocal"):
sd.restrictLocal = dict()
if self not in sd.restrictLocal:
rv = PMCSplitting.restrictStrandDiagram(
self.pmc, sd, self.local_pmc, self.local_mapping)
sd.restrictLocal[self] = rv
return rv
else:
return sd.restrictLocal[self]
def restrictStrandDiagramOuter(self, sd):
"""Returns the local strand diagram that is the restriction of sd to
outer_pmc.
"""
# Memorize within sd.
if not hasattr(sd, "restrictOuter"):
sd.restrictOuter = dict()
if self not in sd.restrictOuter:
rv = PMCSplitting.restrictStrandDiagram(
self.pmc, sd, self.outer_pmc, self.outer_mapping)
sd.restrictOuter[self] = rv
return rv
else:
return sd.restrictOuter[self]
def restrictIdempotentLocal(self, idem):
"""Returns the local idempotent that is the restriction of idem to
local_pmc.
"""
return PMCSplitting.restrictIdempotent(
self.pmc, idem, self.local_pmc, self.local_mapping)
def restrictIdempotentOuter(self, idem):
"""Returns the local idempotent that is the restriction of idem to
outer_pmc.
"""
return PMCSplitting.restrictIdempotent(
self.pmc, idem, self.outer_pmc, self.outer_mapping)
def joinStrandDiagram(self, sd1, sd2):
"""Joins two strand diagrams. sd1 is in local_pmc and sd2 is in
outer_pmc.
Returns None if these two local strand diagrams cannot be joined.
Otherwise returns the strand diagram in a full PMC.
"""
assert sd1.parent.local_pmc == self.local_pmc
assert sd2.parent.local_pmc == self.outer_pmc
# Check idempotent is OK. Create joined left_idem.
left_idem = []
mappings = {1 : self.local_mapping, 2 : self.outer_mapping}
local_pmcs = {1 : self.local_pmc, 2 : self.outer_pmc}
local_left_idem = {1 : sd1.left_idem, 2 : sd2.left_idem}
for pairid in range(self.pmc.num_pair):
p, q = self.pmc.pairs[pairid]
for i in (1, 2):
if p in mappings[i]:
has_idem_p = (local_pmcs[i].pairid[mappings[i][p]]
in local_left_idem[i])
if q in mappings[i]:
has_idem_q = (local_pmcs[i].pairid[mappings[i][q]]
in local_left_idem[i])
if has_idem_p or has_idem_q:
left_idem.append(pairid)
# Construct inverse mapping from points in local_pmc to points in pmc.
inv_mappings = {1 : {}, 2 : {}}
for pt, local_pt in list(mappings[1].items()):
inv_mappings[1][local_pt] = pt
for pt, local_pt in list(mappings[2].items()):
inv_mappings[2][local_pt] = pt
# Create joined strands.
# First create list of local strands in sorted order in original pmc.
local_strands = {1 : sd1.strands, 2 : sd2.strands}
all_local_strands = []
for i in (1, 2):
for start, end in local_strands[i]:
if start in local_pmcs[i].endpoints:
assert start+1 not in local_pmcs[i].endpoints
start_pos = inv_mappings[i][start + 1]
else:
start_pos = inv_mappings[i][start]
all_local_strands.append((start_pos, (start, end), i))
all_local_strands = sorted(all_local_strands)
# First check that every loose end is matched. Otherwise return.
for strands_id in range(len(all_local_strands)):
start_pos, (start, end), pmc_id = all_local_strands[strands_id]
if start in local_pmcs[pmc_id].endpoints:
if strands_id == 0:
return None
prev_pos, (prev_start, prev_end), prev_pmc_id = \
all_local_strands[strands_id - 1]
if prev_end not in local_pmcs[prev_pmc_id].endpoints:
return None
# Check boundaries match in one of the two directions
if prev_pmc_id == pmc_id:
return None
if inv_mappings[pmc_id][start+1] - 1 != \
inv_mappings[prev_pmc_id][prev_end-1]:
return None
if end in local_pmcs[pmc_id].endpoints:
if strands_id == len(all_local_strands) - 1:
return None
next_pos, (next_start, next_end), next_pmc_id = \
all_local_strands[strands_id + 1]
if next_start not in local_pmcs[next_pmc_id].endpoints:
return None
# Having made sure that every loose end is closed, we can simply take
# the sequence of non-endpoints.
all_strand_boundaries = []
for start_pos, (start, end), pmc_id in all_local_strands:
for pt in (start, end):
if pt not in local_pmcs[pmc_id].endpoints:
all_strand_boundaries.append(inv_mappings[pmc_id][pt])
# Now, simply take pairs:
start_pts = all_strand_boundaries[::2]
end_pts = all_strand_boundaries[1::2]
for pt in start_pts:
if self.pmc.otherp[pt] in start_pts:
return None
for pt in end_pts:
if self.pmc.otherp[pt] in end_pts:
return None
strands = list(zip(start_pts, end_pts))
if not Strands(self.pmc, strands).leftCompatible(left_idem):
return None
return self.pmc.getAlgebra(idem_size = len(left_idem),
mult_one = True).getStrandDiagram(
tuple(left_idem), tuple(strands))
def joinIdempotent(self, idem1, idem2):
"""Join two local idempotents. """
return self.joinStrandDiagram(
idem1.toAlgElt(), idem2.toAlgElt()).left_idem
@staticmethod
def complementIntervals(pmc, intervals):
"""Given a full PMC and a list of intervals, return the list of
intervals that forms the complement.
"""
result = []
if len(intervals) == 0:
result.append((0, pmc.n - 1))
return result
if intervals[0][0] != 0:
result.append((0, intervals[0][0] - 1))
if intervals[-1][1] != pmc.n - 1:
result.append((intervals[-1][1] + 1, pmc.n - 1))
for i in range(len(intervals) - 1):
# Consider this case later
assert intervals[i][1] != intervals[i+1][0], \
"Intervals with no points in the middle is not implemented."
result.append((intervals[i][1] + 1, intervals[i+1][0] - 1))
return sorted(result)
@staticmethod
def restrictPMC(pmc, intervals):
"""Given a full PMC and a list of intervals, return a pair
(local_pmc, mapping) where
- local_pmc is the local PMC that is the restriction of pmc to the
intervals.
- mapping is a dictionary mapping from points in pmc to points in
local_pmc.
Example:
PMCSplitting.restrictPMC(PMC([(0, 2),(1, 3)]), [(0, 2)])
=> (LocalPMC(4, [(0, 2), (1,)], [3]), {0:0, 1:1, 2:2})
"""
# For each interval, add the appropriate endpoints. Fill in endpoints
# and mapping first.
num_local_points = 0
endpoints = []
mapping = {}
for start, end in intervals:
if start > end:
continue
length = end - start + 1
if start != 0:
endpoints.append(num_local_points)
num_local_points += 1
for pt in range(start, end+1):
mapping[pt] = num_local_points
num_local_points += 1
if end != pmc.n - 1:
endpoints.append(num_local_points)
num_local_points += 1
# Now compute the matching.
matching = []
for p, q in pmc.pairs:
if p in mapping and q in mapping:
matching.append((mapping[p], mapping[q]))
elif p in mapping and q not in mapping:
matching.append((mapping[p],))
elif p not in mapping and q in mapping:
matching.append((mapping[q],))
return (LocalPMC(num_local_points, matching, endpoints), mapping)
@staticmethod
def restrictStrandDiagram(pmc, sd, local_pmc, mapping):
"""Restrict the given strand diagram to the local_pmc, using mapping as
the dictionary from points in pmc to points in local_pmc.
"""
assert sd.parent.pmc == pmc
# First construct the left idempotent.
local_left_idem = []
for (start, end) in sd.strands:
if start in mapping:
local_left_idem.append(local_pmc.pairid[mapping[start]])
for pairid in sd.double_hor:
p, q = pmc.pairs[pairid]
if p in mapping:
local_left_idem.append(local_pmc.pairid[mapping[p]])
elif q in mapping:
local_left_idem.append(local_pmc.pairid[mapping[q]])
# Next, construct strands. For each strand in sd, construct zero or more
# child strands.
local_strands = []
for start, end in sd.strands:
# Whether to extend the previous item on local_strands. Otherwise,
# will add new local_strand when needed.
extend_prev = False
for pt in range(start, end):
if pt in mapping:
local_pt = mapping[pt]
# The interval (pt, pt+1) corresponds to
# (local_pt, local_pt+1) in the local PMC. Note local_pt+1
# may be an endpoint.
if extend_prev:
prev_start, prev_end = local_strands[-1]
assert prev_end == local_pt
local_strands[-1] = (prev_start, local_pt+1)
else:
if pt != start:
assert local_pt - 1 in local_pmc.endpoints
local_strands.append((local_pt-1, local_pt+1))
else:
local_strands.append((local_pt, local_pt+1))
extend_prev = True
# Turn off extend_prev when endpoint in local_pmc is
# reached.
if local_pt + 1 in local_pmc.endpoints:
extend_prev = False
# Special case at the end.
if end - 1 not in mapping and end in mapping:
local_end = mapping[end]
assert local_end - 1 in local_pmc.endpoints
local_strands.append((local_end - 1, local_end))
return LocalStrandDiagram(local_pmc.getAlgebra(),
local_left_idem, local_strands)
@staticmethod
def restrictIdempotent(pmc, idem, local_pmc, mapping):
"""Restrict the given idempotent to the local_pmc, using mapping as the
dictionary from points in pmc to points in local_pmc.
"""
local_idem = []
for pairid in idem:
p, q = pmc.pairs[pairid]
if p in mapping:
local_idem.append(local_pmc.pairid[mapping[p]])
elif q in mapping:
local_idem.append(local_pmc.pairid[mapping[q]])
return LocalIdempotent(local_pmc, local_idem)
| 30,724
| 37.991117
| 83
|
py
|
bfh_python
|
bfh_python-master/involutive.py
|
"""
Created on Wed May 31 11:22:54 2017
@author: lipshitz
"""
from utility import SummableDict, F2, fracToInt, ACTION_LEFT, ACTION_RIGHT
from algebra import Element, E0
from dstructure import MorDtoDGenerator, DGenerator, SimpleDStructure
from algebra import SimpleChainComplex, SimpleGenerator, SimpleChainMorphism, Generator
from dastructure import SimpleDAStructure, SimpleDAGenerator, identityDA, DATensorDGenerator, augmentationDA
from pmc import StrandDiagram
from grading import SmallGradingGroup, SimpleDbGradingSet, SimpleDbGradingSetElement
from braid import Braid, BraidCap, readBridgePresentation
from arcslideda import ArcslideDA
def applyDMor(f,x):
"Apply a type D morphism, of Element class, to a DGenerator x"
answer = E0
for f0 in list(f.keys()):
answer += f[f0]*f0.apply(x)
return answer
def composeMor(f,g, parent=None):
"Return the composition f\circ g, if f and g are type D morphism Elements (gotten, say, from prev_meaning)"
answer = E0
for g0 in list(g.keys()): #g0 is of type WHAT?
for f0 in list(f.keys()):
addit = (f[f0]*g[g0])* f0.compose(g0,parent)
answer += addit
return answer
def chordPairs(pmc, mult_one = True):
"List of chord pairs as in differential on CFDD(Id)."
answer = list()
algebra = pmc.getAlgebra(mult_one = mult_one)
for i in range(pmc.n):
for j in range(i+1,pmc.n):
for I in pmc.getIdempotents():
Ipairs = [I.pmc.pairs[x] for x in I]
occupied = list()
for x in Ipairs:
occupied.extend(x) #occupied positions in I
if (i in occupied) and not (j in occupied):
sigma = StrandDiagram(algebra,I,[(i,j)])
J = sigma.right_idem.comp()
rho = StrandDiagram(algebra,J,[(i,j)])
answer.append((sigma,rho))
return answer
def azDA(pmc,mult_one = True):
"The type DA module associated to the Auroux-Zarev piece. Input: the alpha pointed matched circle."
algebra = pmc.getAlgebra(mult_one = mult_one)
answer = SimpleDAStructure(F2, algebra, algebra)
#Compute the generators
for a in pmc.getStrandDiagrams(answer.algebra1):
gen = SimpleDAGenerator(answer, a.left_idem.comp(), a.right_idem, a)
answer.addGenerator(gen)
#Terms in the differential coming from the differential on the algebra:
for x in answer.generators:
dx = x.name.diff()
for y in list(dx.keys()):
ygen = SimpleDAGenerator(answer, y.left_idem.comp(), y.right_idem, y)
answer.addDelta(x,ygen,x.idem1.toAlgElt(algebra),list(),dx[y])
#Terms coming from multiplying by algebra elements on the right
for x in answer.generators:
xa = x.name
could_multiply = algebra.getGeneratorsForIdem(left_idem = xa.right_idem)
for b in could_multiply:
if not b.isIdempotent():
xab = xa*b
for y in list(xab.keys()):
ygen = SimpleDAGenerator(answer,y.left_idem.comp(), y.right_idem, y)
answer.addDelta(x,ygen,x.idem1.toAlgElt(algebra),[b,],xab[y])
#Terms coming from differential on CFDD(Id):
chord_pairs = chordPairs(pmc, mult_one)
for (sigma, rho) in chord_pairs:
for x in algebra.getGeneratorsForIdem(left_idem = rho.right_idem):
xgen = SimpleDAGenerator(answer,x.left_idem.comp(), x.right_idem, x)
rhox = rho*x
for y in list(rhox.keys()):
ygen = SimpleDAGenerator(answer, y.left_idem.comp(), y.right_idem, y)
answer.addDelta(xgen, ygen, sigma, list(),rhox[y])
return answer
def tensorDAid(P,Q,M,MP,MQ,morcx,f):
"""Input:
--simple type D structures P and Q
--a simple type DA structure M
--the tensor products M\boxtimes P and M\boxtimes Q
--A morphism f in Mor(P,Q).
Returns: Id\boxtimes f, where Id is the identity morphism of M.
"""
answer = E0
def search(start_gen, cur_dgen, cur_coeffs_a, inTarget = False):
"""Searching for an arrow in the box tensor product.
- start_gen: starting generator in the box tensor product. The
resulting arrow will start from here.
- cur_dgen: current location in the type D structure.
- cur_coeffs_a: current list of A-side inputs to the type DA
structure (or alternatively, list of algebra outputs produced by
the existing path through the type D structure).
"""
answer = E0
start_dagen, start_dgen = start_gen
cur_delta = M.delta(start_dagen, cur_coeffs_a)
for (coeff_d, gen_to), ring_coeff in list(cur_delta.items()):
if inTarget:
startelt = DATensorDGenerator(MP,start_dagen,start_dgen)
endelt = DATensorDGenerator(MQ,gen_to,cur_dgen)
morph = MorDtoDGenerator(morcx, startelt, coeff_d, endelt)
assert morph.source.parent == MP and morph.target.parent == MQ
answer += 1*morph
if M.deltaPrefixNS(start_dagen, cur_coeffs_a):
if not inTarget:
for (coeff_out, dgen_to), ring_coeff in list(P.delta(cur_dgen).items()):
answer += search(start_gen, dgen_to, cur_coeffs_a + (coeff_out,), inTarget=False)
for (coeff_out, dgen_to), ring_coeff in list(applyDMor(f,cur_dgen).items()): #APPLY f
answer += search(start_gen, dgen_to, cur_coeffs_a + (coeff_out,), inTarget=True)
if inTarget:
for (coeff_out, dgen_to), ring_coeff in list(Q.delta(cur_dgen).items()):
answer += search(start_gen, dgen_to, cur_coeffs_a + (coeff_out,), inTarget=True)
return answer
for x in MP.getGenerators():
#Invoke search here with starting data coming from x.
dagen, dgen = x
answer += search(x, dgen, ())
return answer
def mappingConeD(f, returnDicts = False):
"Return the mapping cone of a morphism f (Element class) from one SimpleDStructure to another."
#Extract some basic info from f
for f0 in list(f.keys()):
P = f0.source.parent #Source of f
Q = f0.target.parent #Target of f
answer = SimpleDStructure(F2,P.algebra)
from_to_new = dict()
to_to_new = dict()
new_to_old = dict()
#Add the generators
for x in P.getGenerators():
newx = DGenerator(answer, x.idem)
from_to_new[x] = newx
new_to_old[newx] = x
answer.addGenerator(newx)
for x in Q.getGenerators():
newx = DGenerator(answer, x.idem)
to_to_new[x] = newx
new_to_old[newx] = x
answer.addGenerator(newx)
#Add the differential on P
for x in P.getGenerators():
dx = P.delta(x)
for ay in list(dx.keys()):
answer.addDelta(from_to_new[x],from_to_new[ay[1]],ay[0],dx[ay])
for x in Q.getGenerators():
dx = Q.delta(x)
for ay in list(dx.keys()):
answer.addDelta(to_to_new[x],to_to_new[ay[1]],ay[0],dx[ay])
for f0 in list(f.keys()):
answer.addDelta(from_to_new[f0.source],to_to_new[f0.target],f0.coeff,f[f0])
if returnDicts:
return (answer, from_to_new, to_to_new, new_to_old)
return answer
def isQIDmor(f):
"Check if a morphism of type D structures is a quasi-isomorphism."
cone = mappingConeD(f)
#Test if f is a chain map:
if cone.testDelta():
#Check if cone is acyclic
augmod = augmentationDA(cone.algebra.pmc)
augcone = augmod.tensorD(cone)
augcone.simplify()
if len(augcone) == 0:
return True
return False
def findQI(dMors):
"Return an element of dMors which is a quasi-isomorphism."
for f in dMors:
if isQIDmor(f):
return f
assert False
def involutiveCx(P,Q, sanityTests = False, verbose = False):
"""Returns the rank of involutive Floer homology of Y.s
Input: P, Q: type D modules for handlebodies so that CF^(Y) = H_*(Mor(P,Q))
"""
#How this works:
#Compute:
#0. MP = azDA(pmc).tensorD(P), MQ = azDA(pmc).tensorD(Q)
#1. A graded homotopy equivalence PhiPinv from P to MP
#2. A graded homotopy equivalence PhiQ from MQ to Q
#3. P.morToD(Q).simplify(find_homology_basis = True). Call result PQsimp
#Let iota be the composition PQsimp -> (MP).morToD(MQ) -> P.morToD(Q)
#given by f -> g = tensorDAid(P,Q,MP,MQ,f) -> PhiQ\circ g\circ PhiPinv
#Compute iota on a basis of PQsimp. Result is a morphism of chain complexes.
#Take mapping cone of Id+iota and take homology
pmc = P.algebra.pmc
M = azDA(pmc)
if verbose:
print("Lengths of (P,Q,M) are "+repr((len(P.getGenerators()),len(Q.getGenerators()),len(M.getGenerators()))))
MP = M.tensorD(P) #This seems not to give something homotopy equivalent to P for Orland's example.
if verbose:
print("MP has length "+repr(len(MP.getGenerators())))
MQ = M.tensorD(Q)
if verbose:
print("MQ has length "+repr(len(MQ.getGenerators())))
PtoMPcx = P.morToD(MP) #This step tends to be very slow.
if verbose:
print("Computed PtoMPcx. Number of generators:"+repr(len(PtoMPcx.getGenerators())))
PtoMQcx = P.morToD(MQ)
if verbose:
print("Computed PtoMQcx. Number of generators:"+repr(len(PtoMQcx.getGenerators())))
MQtoQcx = MQ.morToD(Q)
if verbose:
print("Computed MQtoQcx. Number of generators:"+repr(len(MQtoQcx.getGenerators())))
PtoMPcx.simplify(find_homology_basis = True) #This step also slow.
if verbose:
print("Simplified PtoMPcx. Number of generators: "+repr(len(PtoMPcx.getGenerators())))
MQtoQcx.simplify(find_homology_basis = True)
if verbose:
print("Simplified MQtoQcx. Number of generators: "+repr(len(MQtoQcx.getGenerators())))
PhiPinv = findQI([x.prev_meaning for x in PtoMPcx.getGenerators()])
if verbose:
print("Found quasi-isomorphism PhiPinv")
PhiQ = findQI([x.prev_meaning for x in MQtoQcx.getGenerators()])
if verbose:
print("Found quasi-isomorphism PhiQ")
if sanityTests:
assert isQIDmor(PhiPinv)
assert isQIDmor(PhiQ)
PQ = P.morToD(Q)
MPtoMQcx = MP.morToD(MQ) #Presumably this is the limiting step.
if verbose:
print("Computed PQ, MPtoMQcx. Number of generators of (PQ, MPtoMQcx):"+repr((len(PQ.getGenerators()),len(MPtoMQcx.getGenerators()))))
PQ.simplify(find_homology_basis = True)
if verbose:
print("Simplified PQ. Number of generators: "+repr(len(PQ.getGenerators())))
PQtarg = P.morToD(Q)
iota = SimpleChainMorphism(PQ,PQtarg)
for gen_from in PQ.generators:
gf_tens_id = tensorDAid(P,Q,M,MP,MQ,MPtoMQcx,gen_from.prev_meaning)
if sanityTests:
assert MPtoMQcx.diffElt(gf_tens_id) == E0
comp1 = composeMor(gf_tens_id,PhiPinv, parent=PtoMQcx)
if sanityTests:
assert PtoMQcx.diffElt(comp1) == E0
composedMap = composeMor(PhiQ,comp1, parent=PQtarg)
if sanityTests:
assert PQtarg.diffElt(composedMap) == E0
for gen_to in list(composedMap.keys()):
iota.addMorphism(gen_from, gen_to, composedMap[gen_to])
if verbose:
print("Computed iota")
Id = SimpleChainMorphism(PQ,PQtarg)
for gen_from in PQ.generators:
for gen_to in list(gen_from.prev_meaning.keys()):
new_gen_to = MorDtoDGenerator(PQtarg,gen_to.source,gen_to.coeff,gen_to.target)
Id.addMorphism(gen_from, new_gen_to, gen_from.prev_meaning[gen_to])
if sanityTests:
#Sanity checks; should always pass
assert Id.isQI()
assert iota.isQI()
idPlusIota = Id.sum(iota)
if verbose:
print("Computed Id + iota")
mappingcone = idPlusIota.mappingConeCx()
if verbose:
print("Computed mapping cone.")
mappingcone.simplify()
return mappingcone
def invOfDCov(str_input, sanityTests=False, verbose=False):
"""Compute HF^ and HFI^ of the branched double cover of given knot.
Input has the same form as readBridgePresentation (which can be found in data/input_12_FL.txt)"""
bridgePres = readBridgePresentation(str_input)
return invOfDCovSplit(str_input,len(bridgePres.braid_word), sanityTests=sanityTests, verbose=verbose)
def invOfDCovSplit(str_input, split_index, sanityTests=False, verbose=False):
"""Compute HF^ and HFI^ of the branched double cover of given knot,
splitting the diagram after split_index crossings to compute the involutive complex.
Input has the same form as readBridgePresentation (which can be found in data/input_12_FL.txt)"""
bridgePres = readBridgePresentation(str_input)
braidGrp = Braid(bridgePres.num_strands)
slides = braidGrp.getArcslides(bridgePres.braid_word)
slidesInv = [x.inverse() for x in slides]
slidesDA = [ArcslideDA(x) for x in slides]
slidesInvDA = [ArcslideDA(x) for x in slidesInv]
slidesDA.reverse()
answer = list()
cupD = BraidCap(bridgePres.start).openCap()
capD = BraidCap(bridgePres.end).openCap()
P = cupD
Q = capD
for slideDA in slidesInvDA[:split_index]:
P = slideDA.tensorD(P)
P.simplify()
for slideDA in slidesDA[:len(slidesDA)-split_index]:
Q = slideDA.tensorD(Q)
Q.simplify()
cx = P.morToD(Q)
cx.simplify()
if verbose:
print("HF = "+repr(len(cx))+". Working on IHF next.")
invcx = involutiveCx(P,Q, sanityTests=sanityTests, verbose=verbose)
return invcx
def checkSplits(str_input):
"Attempt to find best place to split up a bridge diagram for involutive computation."
bridgePres = readBridgePresentation(str_input)
braidGrp = Braid(bridgePres.num_strands)
slides = braidGrp.getArcslides(bridgePres.braid_word)
slidesInv = [x.inverse() for x in slides]
slidesDA = [ArcslideDA(x) for x in slides]
slidesInvDA = [ArcslideDA(x) for x in slidesInv]
slidesDA.reverse()
answer = list()
cupD = BraidCap(bridgePres.start).openCap()
capD = BraidCap(bridgePres.end).openCap()
for i in range(len(bridgePres.braid_word)):
P = cupD
Q = capD
for slideDA in slidesInvDA[:i]:
P = slideDA.tensorD(P)
P.simplify()
for slideDA in slidesDA[:len(slidesDA)-i]:
Q = slideDA.tensorD(Q)
Q.simplify()
cx = P.morToD(Q)
cxlen = len(cx)
cx.simplify()
print((i,len(P.getGenerators()),len(Q.getGenerators()),cxlen,len(cx)))
answer.append((i,len(P.getGenerators()),len(Q.getGenerators()),cxlen,len(cx)))
return answer
| 14,774
| 41.335244
| 141
|
py
|
bfh_python
|
bfh_python-master/arcslideda.py
|
"""Producing type DA structures for arcslides, using local actions."""
from algebra import TensorGenerator
from dastructure import DAStructure, SimpleDAGenerator
from extendbyid import ExtendedDAStructure, LocalDAStructure
from hdiagram import getArcslideDiagram
from localpmc import LocalIdempotent, LocalStrandAlgebra, PMCSplitting
from pmc import Strands, StrandDiagram
from utility import memorize, subset
from utility import ACTION_LEFT, ACTION_RIGHT, F2
import itertools
class ArcslideDA(ExtendedDAStructure):
"""Responsible for producing a type DA structure for an arcslide, using
local actions.
"""
def __init__(self, slide):
"""Specifies the arcslide to use. slide should be of type Arcslide.
In addition to recording slide, construct the following:
local_pmc1, mapping1 - restriction of starting pmc to location of slide.
outer_pmc1, outer_mapping1 - complement of slide in starting pmc.
local_pmc2, mapping2, outer_pmc2, outer_mapping2
- same, but for ending pmc.
Moreover, find pattern_fun and translator as appropriate for the case at
hand.
"""
self.slide = slide
self.pmc1, self.pmc2 = slide.start_pmc, slide.end_pmc
n = self.pmc1.n
b1, c1, c2 = slide.b1, slide.c1, slide.c2
b1p, c1p, c2p = [slide.to_r[p] for p in (b1, c1, c2)]
# Note intervals (start, end) with start > end are ignored.
# patterns_base specifies one of the four base patterns of arcslides.
# translator gives the mapping between points in the base pattern and
# points in the pattern at hand.
if b1 == c1 + 1: # downward
if c2 == c1 + 2: # short underslide downward
local_cut1, local_cut2 = ([(c1, c2)], [(c1p, c2p)])
patterns_fun = ArcslideDA._short_underslide_down
if c1 == 0:
translator = ([-1, 0, 1, 2, 3], [-1, 0, 1, 2, 3])
elif c2 == n - 1:
translator = ([0, 1, 2, 3, -1], [0, 1, 2, 3, -1])
else:
translator = None
elif c2 > c1: # general underslide downward
local_cut1, local_cut2 = (
[(c1, b1), (c2, c2)], [(c1p, c1p), (b1p, c2p)])
patterns_fun = ArcslideDA._general_underslide_down
if c1 == 0:
translator = ([-1, 0, 1, 2, 3, 4, 5],
[-1, 0, 1, 2, 3, 4, 5])
elif c2 == n - 1:
translator = ([0, 1, 2, 3, 4, 5, -1],
[0, 1, 2, 3, 4, 5, -1])
else:
translator = None
else: # c2 < c1, general overslide downward
local_cut1, local_cut2 = (
[(c2, c2), (c1, b1)], [(b1p, c2p), (c1p, c1p)])
patterns_fun = ArcslideDA._general_underslide_down
if c2 == 0 and b1 == n - 1:
translator = ([2, 3, 4, -1, -1, 0, 1],
[3, 4, -1, -1, 0, 1, 2])
elif c2 == 0:
translator = ([2, 3, 4, 5, -1, 0, 1],
[3, 4, 5, -1, 0, 1, 2])
elif b1 == n - 1:
translator = ([3, 4, 5, -1, 0, 1, 2],
[4, 5, -1, 0, 1, 2, 3])
else:
translator = ([3, 4, 5, 6, 0, 1, 2], [4, 5, 6, 0, 1, 2, 3])
elif b1 == c1 - 1: # upward
if c2 == c1 - 2: # short underslide upward
local_cut1, local_cut2 = ([(c2, c1)], [(c2p, c1p)])
patterns_fun = ArcslideDA._short_underslide_up
if c2 == 0:
translator = ([-1, 0, 1, 2, 3], [-1, 0, 1, 2, 3])
elif c1 == n - 1:
translator = ([0, 1, 2, 3, -1], [0, 1, 2, 3, -1])
else:
translator = None
elif c2 < c1: # general underslide upward
local_cut1, local_cut2 = (
[(c2, c2), (b1, c1)], [(c2p, b1p), (c1p, c1p)])
patterns_fun = ArcslideDA._general_underslide_up
if c2 == 0:
translator = ([-1, 0, 1, 2, 3, 4, 5],
[-1, 0, 1, 2, 3, 4, 5])
elif c1 == n - 1:
translator = ([0, 1, 2, 3, 4, 5, -1],
[0, 1, 2, 3, 4, 5, -1])
else:
translator = None
else: # c2 > c1, general overslide upward
local_cut1, local_cut2 = (
[(b1, c1), (c2, c2)], [(c1p, c1p), (c2p, b1p)])
patterns_fun = ArcslideDA._general_underslide_up
if b1 == 0 and c2 == n - 1:
translator = ([3, 4, -1, -1, 0, 1, 2],
[2, 3, 4, -1, -1, 0, 1])
elif b1 == 0:
translator = ([3, 4, 5, -1, 0, 1, 2],
[2, 3, 4, 5, -1, 0, 1])
elif c2 == n - 1:
translator = ([4, 5, -1, 0, 1, 2, 3],
[3, 4, 5, -1, 0, 1, 2])
else:
translator = ([4, 5, 6, 0, 1, 2, 3], [3, 4, 5, 6, 0, 1, 2])
else:
# All cases are covered. Should not happen.
raise NotImplementedError(
"This slide pattern is not yet implemented.")
self.patterns_fun = patterns_fun
self.translator = translator
# Necesssary to get local DA structure.
self.splitting1 = PMCSplitting(self.pmc1, local_cut1)
self.splitting2 = PMCSplitting(self.pmc2, local_cut2)
self.local_pmc1 = self.splitting1.local_pmc
self.local_pmc2 = self.splitting2.local_pmc
self.mapping1 = self.splitting1.local_mapping
self.mapping2 = self.splitting2.local_mapping
# Required so the left to right transition on the outside can proceed.
assert self.splitting1.outer_pmc == self.splitting2.outer_pmc
# Initiate the ExtendedDAStructure
ExtendedDAStructure.__init__(
self, self.getLocalDAStructure(), self.splitting1, self.splitting2)
# With generators set, add grading. Any generator can serve as base_gen
for gen in self.generators:
base_gen = gen
break
self.registerHDiagram(getArcslideDiagram(self.slide), base_gen)
def getLocalDAStructure(self, seeds_only = False):
"""Returns the local type DA structure associated to slide. If
seeds_only is set to True, get a local DA structure with incomplete
da_action, that can be completed using the autocompleteda module.
"""
# Compute the set of arrow patterns
patterns_raw = self.patterns_fun(seeds_only = seeds_only)
if self.translator is not None:
patterns_raw = ArcslideDA._restrict_local_arrows(
patterns_raw, self.translator[0], self.translator[1])
arrow_patterns = {}
for pattern in patterns_raw:
coeffs_a = []
for i in range(len(pattern)-1):
coeffs_a.append(self.local_pmc2.sd(pattern[i]))
coeffs_a = tuple(coeffs_a)
if coeffs_a not in arrow_patterns:
arrow_patterns[coeffs_a] = []
arrow_patterns[coeffs_a].append(self.local_pmc1.sd(pattern[-1]))
# Now start construction of the local DA structure.
alg1 = LocalStrandAlgebra(F2, self.local_pmc1)
alg2 = LocalStrandAlgebra(F2, self.local_pmc2)
local_dastr = LocalDAStructure(F2, alg1, alg2)
# Mappings between local starting and ending PMC.
slide = self.slide
local_to_r = dict()
for i in range(slide.start_pmc.n):
if i in self.mapping1:
# to_r[i] must be in mapping2
local_to_r[self.mapping1[i]] = self.mapping2[slide.to_r[i]]
local_pair_to_r = dict()
for i in range(self.local_pmc1.n):
if i not in self.local_pmc1.endpoints:
local_pair_to_r[self.local_pmc1.pairid[i]] \
= self.local_pmc2.pairid[local_to_r[i]]
b1, c1 = self.slide.b1, self.slide.c1
local_b1, local_c1 = self.mapping1[b1], self.mapping1[c1]
b_pair1 = self.local_pmc1.pairid[local_b1]
c_pair1 = self.local_pmc1.pairid[local_c1]
# Compute the set of local generators. This includes all
# (l_idem, r_idem) where l_idem = r_idem (under the usual identification
# of pairs), or where l_idem has the c_pair and r_idem has the b_pair.
da_idems = []
num_pair = self.local_pmc1.num_pair
for idem in subset(list(range(num_pair))):
da_idems.append((list(idem), [local_pair_to_r[p] for p in idem]))
for idem in subset([p for p in range(num_pair)
if p != b_pair1 and p != c_pair1]):
da_idems.append((list(idem) + [c_pair1],
[local_pair_to_r[p]
for p in (list(idem) + [b_pair1])]))
for i in range(len(da_idems)):
l_idem, r_idem = da_idems[i]
local_dastr.addGenerator(SimpleDAGenerator(
local_dastr, LocalIdempotent(self.local_pmc1, l_idem),
LocalIdempotent(self.local_pmc2, r_idem), "%d" % i))
mod_gens = local_dastr.getGenerators()
# After having added all generators, create u_map:
local_dastr.auto_u_map()
# Add arrows according to arrow_pattern.
for coeffs_a in list(arrow_patterns.keys()):
if len(coeffs_a) == 1 and coeffs_a[0].isIdempotent():
continue
for coeff_d in arrow_patterns[coeffs_a]:
for x, y in itertools.product(mod_gens, mod_gens):
if DAStructure.idemMatchDA(x, y, coeff_d, coeffs_a):
local_dastr.addDelta(x, y, coeff_d, coeffs_a, 1)
return local_dastr
@staticmethod
def _restrict_local_arrows(patterns, point_map_d, point_map_a):
"""Given a list of patterns (in the format of patterns_raw) for more
general local case, restrict to a more special local case using a
mapping from points in the general local PMC to a special local PMC.
point_map_d and point_map_a specifies the point mappings on the D-side
and the A-side.
This function operates entirely by translation using the given point
map, it does not know about formats for local PMCs or local strand
diagrams.
"""
def translate(lst, mapping):
"""lst consists of either integers or pairs of integers. Translate
according to mapping. If any of the translated value is -1, return
None. Otherwise return the translated list.
"""
result = []
for entry in lst:
if isinstance(entry, int):
result.append(mapping[entry])
if result[-1] == -1:
return None
else: # entry must be a pair
result.append((mapping[entry[0]], mapping[entry[1]]))
if result[-1][0] == -1 or result[-1][1] == -1:
return None
return result
new_patterns = []
for pattern in patterns:
new_pattern = [translate(pattern_d, point_map_a)
for pattern_d in pattern[0:-1]]
new_pattern.append(translate(pattern[-1], point_map_d))
if all([entry != None for entry in new_pattern]):
new_patterns.append(new_pattern)
return new_patterns
# The next series of functions specify the local arrows. The format is as
# follows:
# All but the last element of the tuple is a list to be passed to the sd()
# function of the A-side local PMC, specifying the A-side inputs. The last
# element of the tuple is a list to be passed to the sd() function of the
# D-side local PMC, specifying the D-side output.
@staticmethod
def _short_underslide_down(seeds_only = False):
"""Short underslide going down, in the middle of PMC."""
# Local PMC is 0*-1-2-3-4*, with 1 and 3 paired.
patterns_raw = [
#### Initial patterns
([(1, 2)],),
([], []), ([1], [1]), ([2], [2]), ([2], [1]), ([1, 2], [1, 2]),
([(2, 3)], [1]),
([(1, 2)], [(1, 3)]),
([(1, 3)], [(1, 3)]),
([2, (1, 3)], [2, (1, 3)]),
([(1, 2),(2, 3)], [(1, 2),(2, 3)]),
([(2, 3)], [(1, 2)], [(2, 3)]),
([(2, 3)], [(1, 3)], [(2, 3)]),
#### Seeds for top
([(3, 4)], [(3, 4)]),
([2, (3, 4)], [2, (3, 4)]),
#### Seeds for bottom
([(0, 1)], [(0, 1)]),
([2, (0, 1)], [2, (0, 1)]),
]
if seeds_only:
return patterns_raw
patterns_raw += [
# From seeds for top
([(2, 3), (3, 4)], [(1, 2)], [(1, 2), (2, 4)]),
([2, (3, 4)], [(2, 3)], [(1, 3)], [1, (2, 4)]),
([(2, 3)], [(1, 4)], [(2, 4)]),
([(2, 4)], [(3, 4)]),
([2, (3, 4)], [(2, 3)], [(1, 2)], [1, (2, 4)]),
([(1, 2), (2, 4)], [(1, 2), (2, 4)]),
([2, (1, 4)], [2, (1, 4)]),
([(1, 4)], [(1, 4)]),
# From seeds for bottom
([(2, 3)], [(0, 2), (3, 4)], [(0, 1), (2, 4)]),
([(0, 2)], [(0, 3)]),
([(0, 1), (1, 4)], [(0, 1), (1, 4)]),
([(0, 1), (2, 4)], [(1, 2)], [1, (0, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(1, 2)], [1, (0, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(1, 2)], [(0, 1), (2, 4)]),
([1, (0, 2), (2, 4)], [1, (0, 2), (2, 4)]),
([2, (0, 1), (3, 4)], [2, (1, 3)], [1, 2, (0, 4)]),
([2, (0, 3), (3, 4)], [2, (0, 1), (1, 4)]),
([(3, 4)], [(0, 2)], [1, (0, 4)]),
([(0, 3)], [(0, 3)]),
([(0, 1), (2, 4)], [(1, 2)], [(0, 1), (2, 4)]),
([(0, 2), (3, 4)], [(0, 1), (1, 4)]),
([(2, 3)], [(0, 3), (3, 4)], [(0, 1), (2, 4)]),
([(2, 3), (3, 4)], [(0, 1), (1, 2)], [(0, 2), (2, 3), (3, 4)]),
([(0, 1), (2, 4)], [(1, 3)], [(0, 1), (2, 4)]),
([(0, 2), (2, 4)], [(0, 2), (2, 4)]),
([(2, 3)], [(0, 1), (1, 2)], [2, (3, 4)], [(0, 1), (2, 4)]),
([(2, 3)], [(0, 1), (1, 2)], [(0, 2), (2, 3)]),
([2, (3, 4)], [2, (0, 3)], [1, 2, (0, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(0, 1), (3, 4)]),
([(2, 4)], [(0, 3)], [(0, 1), (2, 4)]),
([(0, 1), (2, 4)], [(0, 1), (3, 4)]),
([(2, 4)], [(0, 2)], [(0, 1), (2, 4)]),
([(0, 3), (3, 4)], [(0, 1), (1, 4)]),
([(0, 2), (2, 3), (3, 4)], [(0, 2), (2, 3), (3, 4)]),
([(2, 4)], [(0, 2)], [1, (0, 4)]),
([(2, 4)], [(0, 3)], [1, (0, 4)]),
([(0, 1), (3, 4)], [(0, 1), (3, 4)]),
([2, (0, 3)], [2, (0, 3)]),
([2, (0, 1), (1, 4)], [2, (0, 1), (1, 4)]),
([(2, 3)], [(0, 1), (1, 2)], [(2, 3), (3, 4)], [(0, 1), (2, 4)]),
([2, (0, 1)], [1, (0, 2)]),
([(2, 3)], [(0, 1), (1, 4)], [(0, 1), (2, 4)]),
([(3, 4)], [(0, 3)], [1, (0, 4)]),
([(0, 4)], [(0, 4)]),
([(0, 1), (1, 2)], [2, (3, 4)], [(0, 1), (1, 4)]),
([(2, 3), (3, 4)], [(1, 2)], [2, (0, 1)], [1, (0, 2), (2, 4)]),
([2, (0, 3), (3, 4)], [1, (0, 2), (2, 4)]),
([2, (0, 1)], [2, (3, 4)], [(0, 1), (3, 4)]),
([2, (0, 4)], [2, (0, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(1, 3)], [(0, 1), (2, 4)]),
([(0, 1), (1, 2)], [(2, 3), (3, 4)], [(0, 1), (1, 4)]),
([(0, 1), (1, 2), (2, 4)], [(0, 1), (1, 2), (2, 4)]),
([1, (2, 4)], [(0, 1), (1, 2)], [1, (0, 2), (2, 4)]),
([(0, 1), (3, 4)], [(1, 2)], [1, (0, 4)]),
([(0, 1), (3, 4)], [(1, 3)], [1, (0, 4)]),
([(0, 1), (2, 4)], [(1, 3)], [1, (0, 4)]),
([2, (0, 1), (3, 4)], [2, (0, 1), (3, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(1, 3)], [1, (0, 4)]),
([(0, 2), (2, 3)], [(0, 2), (2, 3)]),
]
return patterns_raw
@staticmethod
def _general_underslide_down(seeds_only = False):
"""Underslide of length >= 3 going down, in the middle of PMC."""
# Local PMC at left (D-side) is 0*-1-2-3*, 4*-5-6*, with 1 and 5 paired.
# Local PMC at right (A-side) is 0*-1-2*, 3*-4-5-6*, with 1 and 5
# paired.
patterns_raw = [
#### Initial patterns
([(1, 2)],),
([], []), ([1], [1]), ([4], [2]), ([4], [1]), ([1, 4], [1, 2]),
([(4, 5)], [1]),
#### Seeds for top
([(5, 6)], [(5, 6)]),
([4, (5, 6)], [2, (5, 6)]),
#### Seeds for bottom
([(0, 1)], [(0, 1)]),
([4, (0, 1)], [2, (0, 1)]),
#### Seeds for upper middle
([(3, 4)], [(4, 5)]),
#### Seeds for lower middle
([4, (1, 2)], [1, (2, 3)]),
]
if seeds_only:
return patterns_raw
patterns_raw += [
# From seeds for top
([(4, 6)], [(5, 6)]),
# From seeds for bottom
([4, (0, 1)], [(4, 5), (5, 6)], [(0, 1), (5, 6)]),
([4, (0, 1), (5, 6)], [2, (0, 1), (5, 6)]),
([4, (0, 1)], [4, (5, 6)], [(0, 1), (5, 6)]),
([(0, 1), (4, 6)], [(0, 1), (5, 6)]),
([4, (0, 1)], [1, (0, 2)]),
([(0, 1), (5, 6)], [(0, 1), (5, 6)]),
# From seed for upper middle
([(3, 4), (4, 6)], [(1, 2), (4, 6)]),
([(0, 1), (3, 4)], [(0, 2), (4, 5)]),
([(3, 5)], [(4, 5)]),
([(3, 4), (4, 6)], [4, (0, 1)], [1, (0, 2), (4, 6)]),
([(3, 4), (4, 5)], [4, (0, 1), (5, 6)], [1, (0, 2), (4, 6)]),
([(0, 1), (3, 4)], [4, (5, 6)], [(0, 1), (4, 6)]),
([1, (3, 4)], [(4, 5), (5, 6)], [1, (4, 6)]),
([4, (0, 1), (3, 6)], [2, (0, 1), (4, 6)]),
([(0, 1), (3, 4)], [(4, 5), (5, 6)], [(0, 1), (4, 6)]),
([(3, 6)], [(4, 6)]),
([4, (3, 6)], [2, (4, 6)]),
([(3, 4), (5, 6)], [(4, 5), (5, 6)]),
([4, (0, 1), (3, 6)], [1, (0, 2), (4, 6)]),
([(4, 5)], [(3, 5), (5, 6)], [1, (4, 6)]),
([(0, 1), (3, 4), (5, 6)], [(0, 2), (4, 5), (5, 6)]),
([4, (3, 5)], [2, (4, 5)]),
([(0, 1), (3, 6)], [(0, 1), (4, 6)]),
([(4, 6)], [(3, 4)], [1, (4, 6)]),
([1, (3, 4)], [(1, 2), (4, 5)]),
([(4, 6)], [(3, 5)], [1, (4, 6)]),
([1, (3, 4)], [4, (5, 6)], [1, (4, 6)]),
([1, (3, 4)], [4, (0, 1), (5, 6)], [1, (0, 2), (4, 6)]),
([1, (3, 6)], [1, (4, 6)]),
([(3, 5), (5, 6)], [(4, 5), (5, 6)]),
([(0, 1), (3, 4), (4, 6)], [(0, 2), (4, 5), (5, 6)]),
([(3, 4), (4, 5)], [(1, 2), (4, 5)]),
([4, (0, 1)], [(3, 4), (4, 5), (5, 6)], [(0, 2), (4, 5), (5, 6)]),
([(4, 5)], [(3, 4), (5, 6)], [1, (4, 6)]),
([4, (3, 5), (5, 6)], [2, (4, 5), (5, 6)]),
([1, 4, (3, 6)], [1, 2, (4, 6)]),
# From seed for lower middle
([(4, 5)], [(0, 2), (3, 4), (5, 6)], [1, (0, 3), (4, 6)]),
([(4, 6)], [(3, 5)], [(1, 2)], [(2, 3), (4, 6)]),
([(1, 2), (3, 6)], [(1, 3), (4, 6)]),
([(0, 2)], [(0, 3)]),
([(4, 5), (5, 6)], [(1, 2)], [(2, 3), (5, 6)]),
([1, 4, (0, 2), (3, 6)], [1, 2, (0, 3), (4, 6)]),
([(3, 4), (4, 5)], [4, (0, 1), (1, 2)], [4, (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([4, (0, 1), (1, 2)], [4, (5, 6)], [(0, 1), (2, 3), (5, 6)]),
([(0, 1), (1, 2), (3, 4)], [4, (5, 6)], [(0, 1), (1, 3), (4, 6)]),
([(0, 1), (1, 2), (4, 6)], [(0, 1), (2, 3), (5, 6)]),
([1, (0, 2)], [1, (0, 3)]),
([(4, 5), (5, 6)], [(1, 2), (3, 5)], [(2, 3), (4, 5), (5, 6)]),
([(1, 2), (4, 6)], [(3, 4)], [1, (2, 3), (4, 6)]),
([(3, 4), (4, 5)], [4, (1, 2)], [(2, 3), (4, 5)]),
([4, (1, 2), (3, 6)], [2, (1, 3), (4, 6)]),
([(1, 2), (3, 5)], [(1, 3), (4, 5)]),
([(1, 2), (3, 4), (4, 6)], [(1, 2), (2, 3), (4, 6)]),
([4, (0, 1), (1, 2)], [(4, 5), (5, 6)], [(0, 1), (2, 3), (5, 6)]),
([4, (1, 2), (3, 5)], [2, (1, 3), (4, 5)]),
([(4, 6)], [(0, 2), (3, 4)], [1, (0, 3), (4, 6)]),
([1, (3, 4)], [(0, 2), (4, 5), (5, 6)], [1, (0, 3), (4, 6)]),
([(4, 5)], [(1, 2), (3, 5)], [(2, 3), (4, 5)]),
([(0, 2), (3, 6)], [(0, 3), (4, 6)]),
([(1, 2), (4, 6)], [(3, 5)], [1, (2, 3), (4, 6)]),
([4, (0, 1), (1, 2)], [(3, 4), (4, 5), (5, 6)], [(0, 2), (2, 3), (4, 5), (5, 6)]),
([(0, 2), (4, 5)], [(3, 5), (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(1, 2), (3, 4), (4, 5)], [(1, 2), (2, 3), (4, 5)]),
([(1, 2), (4, 5)], [1, (2, 3)]),
([(0, 2), (3, 4), (5, 6)], [(0, 3), (4, 5), (5, 6)]),
([(4, 5)], [(0, 1), (1, 2), (3, 4)], [4, (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (4, 5)], [(3, 5), (5, 6)], [1, (0, 3), (4, 6)]),
([(3, 4), (4, 5)], [(4, 5), (5, 6)], [(1, 2)], [(2, 3), (4, 6)]),
([(0, 2), (4, 6)], [(0, 3), (5, 6)]),
([1, (3, 4), (4, 6)], [(1, 2), (4, 5)], [1, (2, 3), (4, 6)]),
([(3, 4), (4, 5)], [(1, 2), (4, 6)], [(2, 3), (4, 6)]),
([4, (0, 1)], [(1, 2), (3, 4), (4, 6)], [(0, 1), (2, 3), (4, 6)]),
([4, (0, 1), (1, 2)], [2, (0, 1), (1, 3)]),
([(0, 1), (1, 2), (3, 4), (4, 6)], [(0, 2), (2, 3), (4, 5), (5, 6)]),
([(0, 1), (1, 2)], [(0, 1), (1, 3)]),
([1, (4, 6)], [(1, 2), (3, 5)], [1, (2, 3), (4, 6)]),
([1, (4, 6)], [(1, 2), (3, 4)], [1, (2, 3), (4, 6)]),
([(0, 2), (4, 5)], [(3, 4), (5, 6)], [1, (0, 3), (4, 6)]),
([(4, 5)], [(0, 1), (1, 2), (3, 6)], [(0, 1), (2, 3), (4, 6)]),
([4, (0, 2)], [(0, 1), (2, 3)]),
([(0, 1), (4, 6)], [(1, 2), (3, 5)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (3, 5)], [(0, 3), (4, 5)]),
([(0, 1), (3, 4), (4, 6)], [(1, 2), (4, 5)], [(0, 1), (2, 3), (4, 6)]),
([1, (0, 2), (3, 4)], [(4, 5), (5, 6)], [1, (0, 3), (4, 6)]),
([(4, 5)], [(3, 5), (5, 6)], [(0, 1), (1, 2)], [(0, 1), (2, 3), (4, 6)]),
([1, 4, (0, 2)], [1, 2, (0, 3)]),
([(3, 4), (4, 6)], [(4, 5)], [(0, 1), (1, 2)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (4, 6)], [(3, 5)], [(0, 1), (2, 3), (4, 6)]),
([4, (0, 2), (5, 6)], [(0, 1), (2, 3), (5, 6)]),
([4, (0, 2)], [1, (0, 3)]),
([(3, 4), (4, 5)], [(0, 2), (4, 5), (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (3, 4), (4, 6)], [(0, 1), (1, 3), (4, 6)]),
([(0, 2), (3, 4), (4, 5)], [4, (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(4, 5)], [(0, 1), (1, 2), (3, 4)], [(0, 2), (2, 3), (4, 5)]),
([4, (0, 1), (1, 2)], [1, (0, 2), (2, 3)]),
([(1, 2), (3, 4)], [(1, 3), (4, 5)]),
([4, (0, 2), (3, 5)], [2, (0, 3), (4, 5)]),
([(3, 4), (4, 5)], [(1, 2), (4, 5)], [(2, 3), (4, 5)]),
([(3, 4), (4, 6)], [(4, 5)], [(1, 2)], [(2, 3), (4, 6)]),
([(0, 2), (4, 5)], [1, (0, 3)]),
([(4, 5), (5, 6)], [(1, 2), (3, 4)], [(2, 3), (4, 5), (5, 6)]),
([(0, 2), (4, 5), (5, 6)], [(0, 1), (2, 3), (5, 6)]),
([(4, 5)], [(3, 5), (5, 6)], [(1, 2)], [(2, 3), (4, 6)]),
([(0, 2), (4, 5)], [(0, 1), (2, 3)]),
([(4, 6)], [(0, 2), (3, 5)], [(0, 1), (2, 3), (4, 6)]),
([(3, 4), (4, 6)], [(0, 2), (4, 5)], [(0, 1), (2, 3), (4, 6)]),
([1, (3, 4), (4, 6)], [4, (1, 2)], [1, (2, 3), (4, 6)]),
([(3, 4), (4, 5)], [(4, 5), (5, 6)], [(0, 1), (1, 2)], [(0, 1), (2, 3), (4, 6)]),
([(3, 4), (4, 5)], [(0, 1), (1, 2), (4, 6)], [(0, 1), (2, 3), (4, 6)]),
([(3, 4), (4, 5)], [4, (0, 1), (1, 2)], [(0, 2), (2, 3), (4, 5)]),
([(0, 1), (3, 4), (4, 6)], [4, (1, 2)], [1, (0, 3), (4, 6)]),
([(1, 2), (3, 4), (4, 6)], [4, (0, 1)], [1, (0, 2), (2, 3), (4, 6)]),
([(4, 5)], [(1, 2)], [(2, 3)]),
([1, (0, 2), (3, 6)], [1, (0, 3), (4, 6)]),
([4, (1, 2)], [2, (1, 3)]),
([(3, 4), (4, 5)], [4, (0, 1), (1, 2)], [(4, 5), (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (3, 4), (4, 5)], [(0, 2), (2, 3), (4, 5)]),
([(4, 5)], [(0, 1), (1, 2), (3, 4)], [(4, 5), (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([4, (0, 1), (1, 2), (3, 6)], [2, (0, 1), (1, 3), (4, 6)]),
([(3, 4), (4, 5), (5, 6)], [4, (1, 2)], [(2, 3), (4, 5), (5, 6)]),
([(4, 6)], [(0, 2), (3, 5)], [1, (0, 3), (4, 6)]),
([4, (0, 1), (1, 2), (3, 6)], [1, (0, 2), (2, 3), (4, 6)]),
([(1, 2), (4, 5)], [(3, 4), (5, 6)], [1, (2, 3), (4, 6)]),
([(4, 5)], [(1, 2), (3, 6)], [(2, 3), (4, 6)]),
([4, (0, 2), (5, 6)], [2, (0, 3), (5, 6)]),
([4, (0, 2)], [2, (0, 3)]),
([(0, 2), (5, 6)], [(0, 3), (5, 6)]),
([(1, 2), (4, 5)], [(3, 5), (5, 6)], [1, (2, 3), (4, 6)]),
([(0, 1), (4, 6)], [(1, 2), (3, 4)], [1, (0, 3), (4, 6)]),
([4, (0, 2), (3, 5), (5, 6)], [2, (0, 3), (4, 5), (5, 6)]),
([(0, 1), (4, 6)], [(1, 2), (3, 5)], [1, (0, 3), (4, 6)]),
([4, (0, 2), (3, 6)], [2, (0, 3), (4, 6)]),
([1, (0, 2), (3, 4)], [4, (5, 6)], [1, (0, 3), (4, 6)]),
([(0, 1), (3, 4), (4, 6)], [(1, 2), (4, 5)], [1, (0, 3), (4, 6)]),
([(0, 2), (3, 4)], [(0, 3), (4, 5)]),
([(0, 1), (1, 2), (3, 6)], [(0, 1), (1, 3), (4, 6)]),
([(0, 2), (3, 4), (4, 5), (5, 6)], [(0, 2), (2, 3), (4, 5), (5, 6)]),
([(4, 5)], [(1, 2), (3, 4)], [(2, 3), (4, 5)]),
([(1, 2)], [(1, 3)]),
([(0, 2), (3, 5), (5, 6)], [(0, 3), (4, 5), (5, 6)]),
([(1, 2), (3, 4), (4, 5)], [4, (0, 1), (5, 6)], [1, (0, 2), (2, 3), (4, 6)]),
([(3, 4), (4, 5), (5, 6)], [4, (0, 1), (1, 2)], [(0, 2), (2, 3), (4, 5), (5, 6)]),
([1, (3, 4)], [4, (0, 2), (5, 6)], [1, (0, 3), (4, 6)]),
([(0, 2), (3, 4), (4, 6)], [(4, 5)], [(0, 1), (2, 3), (4, 6)]),
([(4, 5)], [(0, 1), (1, 2)], [(0, 1), (2, 3)]),
([(4, 5)], [(0, 2), (3, 5), (5, 6)], [1, (0, 3), (4, 6)]),
([4, (0, 2)], [(3, 4), (4, 6)], [(0, 1), (2, 3), (4, 6)]),
([(4, 5), (5, 6)], [(0, 1), (1, 2)], [(0, 1), (2, 3), (5, 6)]),
([(4, 5)], [(0, 2), (3, 5), (5, 6)], [(0, 1), (2, 3), (4, 6)]),
([(0, 2), (4, 6)], [(3, 5)], [1, (0, 3), (4, 6)]),
([(1, 2), (4, 6)], [(2, 3), (5, 6)]),
([(0, 1), (1, 2), (3, 4)], [(4, 5), (5, 6)], [(0, 1), (1, 3), (4, 6)]),
([(0, 2), (4, 6)], [(3, 4)], [1, (0, 3), (4, 6)]),
([(4, 6)], [(3, 5)], [(0, 1), (1, 2)], [(0, 1), (2, 3), (4, 6)]),
([(3, 4), (4, 5), (5, 6)], [(1, 2), (4, 5)], [(2, 3), (4, 5), (5, 6)]),
([4, (0, 2), (3, 6)], [(0, 1), (2, 3), (4, 6)]),
([(4, 5), (5, 6)], [(0, 1), (1, 2), (3, 4)], [(0, 2), (2, 3), (4, 5), (5, 6)]),
]
return patterns_raw
@staticmethod
def _short_underslide_up(seeds_only = False):
"""Short underslide going up, in the middle of PMC."""
# Local PMC is 0*-1-2-3-4*, with 1 and 3 paired.
patterns_raw = [
#### Initial patterns
([(2, 3)],),
([], []), ([1], [1]), ([2], [2]), ([2], [1]), ([1, 2], [1, 2]),
([(1, 2)], [1]),
([(2, 3)], [(1, 3)]),
([(1, 3)], [(1, 3)]),
([2, (1, 3)], [2, (1, 3)]),
([(1, 2),(2, 3)], [(1, 2),(2, 3)]),
([(2, 3)], [(1, 2)], [(1, 2)]),
([(1, 3)], [(1, 2)], [(1, 2)]),
#### Seeds for top
([(3, 4)], [(3, 4)]),
([2, (3, 4)], [2, (3, 4)]),
#### Seeds for bottom
([(0, 1)], [(0, 1)]),
([2, (0, 1)], [2, (0, 1)]),
]
if seeds_only:
return patterns_raw
patterns_raw += [
# From seeds for top
([(2, 3), (3, 4)], [(1, 2)], [(1, 2), (2, 4)]),
([(2, 4)], [(1, 4)]),
([(1, 4)], [(1, 4)]),
([2, (1, 4)], [2, (1, 4)]),
([2, (3, 4)], [1, (2, 4)]),
([(1, 2), (2, 4)], [(1, 2), (2, 4)]),
# From seeds for bottom
([(1, 3)], [1, (0, 2)], [1, (0, 2)]),
([(1, 3)], [(0, 2), (3, 4)], [1, (0, 4)]),
([(2, 3)], [(0, 2), (3, 4)], [(0, 2), (3, 4)]),
([(0, 2), (3, 4)], [(0, 1), (3, 4)]),
([(1, 4)], [(0, 1)], [1, (0, 4)]),
([(0, 1), (1, 2), (2, 4)], [(0, 1), (1, 2), (2, 4)]),
([(0, 1), (1, 2)], [(2, 3), (3, 4)], [(1, 2)], [(0, 2), (3, 4)]),
([(1, 3)], [(0, 2), (3, 4)], [(0, 2), (3, 4)]),
([(0, 2)], [(0, 1)]),
([1, (0, 2), (2, 4)], [1, (0, 2), (2, 4)]),
([(0, 1), (1, 4)], [(1, 2)], [(0, 2), (3, 4)]),
([(0, 3)], [(0, 3)]),
([2, (0, 1), (1, 4)], [1, (0, 2), (2, 4)]),
([2, (0, 1), (1, 4)], [2, (0, 3), (3, 4)]),
([(0, 2), (2, 3), (3, 4)], [(0, 2), (2, 3), (3, 4)]),
([(0, 1), (2, 4)], [(1, 2)], [(0, 2), (3, 4)]),
([(2, 3), (3, 4)], [1, (0, 2)], [1, (0, 2), (2, 4)]),
([(0, 1), (2, 4)], [(0, 3), (3, 4)]),
([2, (1, 3)], [2, (0, 1), (3, 4)], [1, 2, (0, 4)]),
([(2, 3)], [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
([(0, 2), (2, 4)], [(0, 2), (2, 4)]),
([(2, 4)], [(0, 1)], [1, (0, 4)]),
([(0, 1), (1, 4)], [(0, 3), (3, 4)]),
([(2, 4)], [(0, 2)], [1, (0, 4)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(1, 2)], [(0, 2), (3, 4)]),
([(1, 3)], [(0, 1), (3, 4)], [1, (0, 4)]),
([(2, 3)], [(0, 1), (3, 4)], [1, (0, 4)]),
([(0, 1), (1, 2)], [(2, 3), (3, 4)], [(0, 3), (3, 4)]),
([2, (0, 3), (3, 4)], [2, (0, 3), (3, 4)]),
([(1, 4)], [(0, 2)], [(0, 2), (3, 4)]),
([(0, 1), (1, 2)], [2, (3, 4)], [(0, 1), (3, 4)]),
([(2, 3), (3, 4)], [(0, 1), (1, 2)], [(0, 1), (1, 2), (2, 4)]),
([(0, 1), (3, 4)], [(0, 1), (3, 4)]),
([2, (0, 3)], [2, (0, 3)]),
([(0, 3), (3, 4)], [(1, 2)], [(0, 2), (3, 4)]),
([(1, 4)], [(0, 2)], [1, (0, 4)]),
([(0, 4)], [(0, 4)]),
([2, (1, 4)], [2, (0, 1)], [1, 2, (0, 4)]),
([(2, 3)], [1, (0, 2)], [1, (0, 2)]),
([2, (0, 4)], [2, (0, 4)]),
([2, (0, 1)], [2, (3, 4)], [(0, 1), (3, 4)]),
([(2, 3)], [(0, 2), (3, 4)], [1, (0, 4)]),
([(0, 3)], [(1, 2)], [(0, 2)]),
([2, (0, 1)], [(2, 3), (3, 4)], [(0, 3), (3, 4)]),
([(1, 3)], [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
([(2, 4)], [(0, 2)], [(0, 2), (3, 4)]),
([(0, 3), (3, 4)], [(0, 3), (3, 4)]),
([2, (0, 1), (3, 4)], [2, (0, 1), (3, 4)]),
([(0, 2), (2, 3)], [(0, 2), (2, 3)]),
]
return patterns_raw
@staticmethod
def _general_underslide_up(seeds_only = False):
"""Underslide of length >= 3 going up, in the middle of PMC."""
# Local PMC at left (D-side) is 0*-1-2*, 3*-4-5-6*, with 1 and 5 paired.
# Local PMC at right (A-side) is 0*-1-2-3*, 4*-5-6*, with 1 and 5
# paired.
patterns_raw = [
#### Initial patterns
([(4, 5)],),
([], []), ([1], [1]), ([2], [4]), ([2], [1]), ([1, 2], [1, 4]),
([(1, 2)], [1]),
#### Seeds for top
([(5, 6)], [(5, 6)]),
([2, (5, 6)], [4, (5, 6)]),
#### Seeds for bottom
([(0, 1)], [(0, 1)]),
([2, (0, 1)], [4, (0, 1)]),
#### Seed for upper middle
([2, (4, 5)], [1, (3, 4)]),
#### Seed for lower middle
([(2, 3)], [(1, 2)]),
]
if seeds_only:
return patterns_raw
patterns_raw += [
# From seeds for top
([2, (5, 6)], [1, (4, 6)]),
# From seeds for bottom
([2, (0, 1)], [2, (5, 6)], [(0, 1), (5, 6)]),
([(0, 2), (5, 6)], [(0, 1), (5, 6)]),
([2, (0, 1), (5, 6)], [4, (0, 1), (5, 6)]),
([(0, 2)], [(0, 1)]),
([(0, 1), (1, 2)], [2, (5, 6)], [(0, 1), (5, 6)]),
([(0, 1), (5, 6)], [(0, 1), (5, 6)]),
# From seed for upper middle
([2, (4, 5)], [4, (3, 5)]),
([1, (4, 6)], [1, (3, 6)]),
([(0, 2), (4, 5), (5, 6)], [(0, 1), (3, 4), (5, 6)]),
([(4, 5)], [(3, 5)]),
([(0, 1), (1, 2), (4, 6)], [(0, 1), (3, 4), (5, 6)]),
([2, (4, 6)], [4, (3, 6)]),
([2, (0, 1)], [2, (4, 5), (5, 6)], [(0, 1), (3, 4), (5, 6)]),
([(4, 5), (5, 6)], [(1, 2)], [(3, 4), (5, 6)]),
([(1, 2), (4, 6)], [(3, 4), (5, 6)]),
([(0, 1), (4, 6)], [(0, 1), (3, 6)]),
([1, 2, (4, 6)], [1, 4, (3, 6)]),
([(4, 5)], [(1, 2)], [(3, 4)]),
([(1, 2), (4, 5)], [1, (3, 4)]),
([2, (0, 1), (4, 6)], [4, (0, 1), (3, 6)]),
([2, (4, 6)], [1, (3, 6)]),
([(0, 1), (1, 2)], [2, (4, 5), (5, 6)], [(0, 1), (3, 4), (5, 6)]),
([(0, 2), (4, 5)], [(0, 1), (3, 4)]),
([2, (4, 6)], [(3, 4), (5, 6)]),
([(4, 5), (5, 6)], [(3, 5), (5, 6)]),
([(4, 6)], [(3, 6)]),
([2, (4, 5), (5, 6)], [1, (3, 4), (4, 6)]),
([(1, 2), (4, 6)], [1, (3, 6)]),
([2, (0, 1), (4, 6)], [(0, 1), (3, 4), (5, 6)]),
([2, (4, 5), (5, 6)], [4, (3, 5), (5, 6)]),
([(0, 2), (4, 6)], [(0, 1), (3, 6)]),
([(4, 5)], [(0, 1), (1, 2)], [(0, 1), (3, 4)]),
([(4, 5), (5, 6)], [(0, 1), (1, 2)], [(0, 1), (3, 4), (5, 6)]),
# From seed for lower middle
([2, (0, 3), (4, 5), (5, 6)], [4, (0, 2), (3, 5), (5, 6)]),
([2, (0, 3), (5, 6)], [4, (0, 2), (5, 6)]),
([(2, 3), (5, 6)], [(0, 1)], [1, (0, 2), (4, 6)]),
([(1, 3), (4, 5)], [(1, 2)], [(1, 2), (3, 4)]),
([(0, 2), (4, 5), (5, 6)], [(1, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3)], [(0, 2), (4, 5)], [1, (0, 2), (3, 4)]),
([2, (4, 5)], [(0, 1), (1, 2), (2, 3)], [(0, 1), (1, 2), (3, 4)]),
([2, (1, 3)], [2, (0, 1), (4, 6)], [1, 4, (0, 2), (3, 6)]),
([(0, 3), (4, 5)], [(0, 2), (3, 5)]),
([(0, 2), (2, 3)], [2, (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3), (4, 5)], [(0, 2), (5, 6)], [(0, 2), (3, 4), (5, 6)]),
([(2, 3), (4, 5)], [(0, 1), (1, 2)], [(0, 1), (1, 2), (3, 4)]),
([(0, 1), (1, 2), (2, 3)], [2, (5, 6)], [(0, 1), (1, 2), (4, 6)]),
([(2, 3), (4, 5)], [(0, 2), (5, 6)], [1, (0, 2), (3, 6)]),
([2, (1, 3), (4, 6)], [2, (0, 1)], [1, 4, (0, 2), (3, 6)]),
([(0, 1), (1, 2), (4, 6)], [(1, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([1, 2, (0, 3)], [1, 4, (0, 2)]),
([(0, 1), (1, 3), (4, 6)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([2, (0, 3), (4, 6)], [4, (0, 2), (3, 6)]),
([(4, 5)], [(1, 3)], [(0, 2)], [(0, 2), (3, 4)]),
([(2, 3)], [(0, 2), (4, 6)], [1, (0, 2), (3, 6)]),
([(0, 1), (2, 3)], [(5, 6)], [(0, 2), (5, 6)]),
([(0, 1), (1, 2)], [2, (4, 5), (5, 6)], [(1, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([(0, 1), (1, 3)], [(1, 2), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(2, 3)], [(0, 1)], [1, (0, 2)]),
([2, (0, 1), (1, 3), (4, 6)], [1, (0, 2), (3, 4), (4, 6)]),
([(1, 3), (4, 6)], [(1, 2), (3, 6)]),
([(1, 2), (4, 6)], [(0, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3), (4, 5)], [(0, 1), (1, 2)], [(0, 1), (1, 2), (3, 4)]),
([1, (4, 6)], [(0, 1), (1, 3)], [1, (0, 2), (3, 6)]),
([(1, 2), (2, 3), (4, 6)], [(1, 2), (3, 4), (4, 6)]),
([2, (0, 1)], [(2, 3), (4, 5), (5, 6)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([2, (0, 3), (4, 5)], [4, (0, 2), (3, 5)]),
([2, (0, 1), (1, 3)], [1, 2, (4, 6)], [1, 4, (0, 2), (3, 6)]),
([(1, 2)], [(0, 2), (2, 3), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(4, 5)], [(0, 1), (1, 2)], [(1, 2), (2, 3)], [(0, 2), (3, 4)]),
([2, (1, 3), (4, 5)], [2, (0, 1), (5, 6)], [1, 4, (0, 2), (3, 6)]),
([(4, 5), (5, 6)], [(0, 1), (1, 3)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([2, (1, 3)], [4, (1, 2)]),
([1, (2, 3)], [(1, 2), (4, 5)]),
([2, (4, 5)], [(1, 2), (2, 3)], [(1, 2), (3, 4)]),
([(0, 2), (2, 3), (4, 5), (5, 6)], [(0, 1), (1, 2), (3, 4), (4, 6)]),
([(2, 3)], [(0, 2)], [1, (0, 2)]),
([(0, 2), (4, 5)], [(1, 2), (2, 3)], [(0, 2), (3, 4)]),
([(2, 3)], [(0, 1), (4, 6)], [1, (0, 2), (3, 6)]),
([1, (0, 2), (2, 3)], [2, (5, 6)], [1, (0, 2), (4, 6)]),
([(4, 5), (5, 6)], [(1, 2)], [(0, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([2, (4, 5), (5, 6)], [(1, 2), (2, 3)], [(1, 2), (3, 4), (4, 6)]),
([(1, 2), (4, 5)], [1, (0, 2), (2, 3)], [1, (0, 2), (3, 4)]),
([(0, 1), (1, 3)], [(1, 2), (4, 6)], [1, (0, 2), (3, 6)]),
([(0, 1), (1, 3)], [(1, 2), (4, 5)], [1, (0, 2), (3, 4)]),
([(0, 1), (2, 3)], [(4, 5), (5, 6)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3), (4, 5)], [(0, 1), (5, 6)], [1, (0, 2), (3, 6)]),
([(2, 3), (4, 5), (5, 6)], [(0, 1), (1, 2)], [(0, 1), (1, 2), (3, 4), (4, 6)]),
([(4, 5)], [(1, 2)], [(0, 2), (2, 3)], [(0, 2), (3, 4)]),
([(1, 3), (4, 6)], [(0, 2)], [(0, 2), (3, 4), (5, 6)]),
([(0, 2), (2, 3)], [(0, 2), (4, 5)]),
([1, (0, 3)], [1, (0, 2)]),
([(4, 5), (5, 6)], [(1, 3)], [(0, 2)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3), (4, 5)], [(1, 2), (3, 5)]),
([(2, 3), (5, 6)], [(1, 2), (4, 6)]),
([2, (1, 3), (4, 5)], [4, (1, 2), (3, 5)]),
([2, (0, 1)], [2, (4, 5), (5, 6)], [(1, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([(2, 3), (4, 5)], [(0, 1), (5, 6)], [1, (0, 2), (3, 6)]),
([(0, 1), (2, 3)], [(1, 2), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(2, 3), (4, 6)], [(0, 1)], [1, (0, 2), (3, 6)]),
([(0, 3), (4, 5), (5, 6)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([2, (0, 3), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(1, 2), (4, 5)], [(0, 2), (2, 3), (5, 6)], [(0, 2), (3, 4), (5, 6)]),
([2, (4, 5), (5, 6)], [(0, 1), (1, 2), (2, 3)], [(0, 1), (1, 2), (3, 4), (4, 6)]),
([(0, 1), (2, 3)], [(0, 2), (4, 5)]),
([(2, 3), (4, 6)], [(0, 2)], [1, (0, 2), (3, 6)]),
([(2, 3), (4, 5)], [(1, 2)], [(1, 2), (3, 4)]),
([(0, 1), (1, 2)], [(2, 3), (4, 5), (5, 6)], [(0, 2), (3, 5), (5, 6)]),
([(0, 1), (1, 3)], [1, (4, 6)], [1, (0, 2), (3, 6)]),
([(0, 1), (2, 3), (4, 6)], [(0, 2), (3, 5), (5, 6)]),
([2, (1, 3), (4, 6)], [4, (1, 2), (3, 6)]),
([(1, 3)], [(1, 2)]),
([2, (0, 3)], [4, (0, 2)]),
([1, (2, 3)], [(0, 1), (5, 6)], [1, (0, 2), (4, 6)]),
([2, (0, 1)], [(1, 2), (2, 3), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(0, 2), (2, 3), (5, 6)], [2, (4, 5)], [1, (0, 2), (3, 4), (4, 6)]),
([(0, 1), (1, 2), (2, 3)], [2, (4, 5), (5, 6)], [(0, 1), (1, 2), (3, 4), (4, 6)]),
([(0, 1), (1, 2)], [(2, 3), (5, 6)], [(0, 2), (5, 6)]),
([(2, 3), (5, 6)], [(0, 2)], [1, (0, 2), (4, 6)]),
([(4, 5)], [(0, 1), (1, 3)], [(1, 2)], [(0, 2), (3, 4)]),
([(0, 3), (4, 5), (5, 6)], [(0, 2), (3, 5), (5, 6)]),
([2, (0, 1), (5, 6)], [(1, 2), (2, 3), (4, 5)], [1, (0, 2), (3, 4), (4, 6)]),
([(0, 3), (5, 6)], [(0, 2), (5, 6)]),
([(0, 1), (2, 3), (5, 6)], [(0, 2), (4, 5), (5, 6)]),
([1, 2, (4, 6)], [2, (0, 1), (1, 3)], [1, 4, (0, 2), (3, 6)]),
([(0, 2), (2, 3), (4, 6)], [(0, 2), (3, 5), (5, 6)]),
([(1, 3), (4, 6)], [(0, 2)], [1, (0, 2), (3, 6)]),
([(1, 3), (4, 5)], [(0, 2), (5, 6)], [1, (0, 2), (3, 6)]),
([(0, 2), (2, 3), (5, 6)], [(0, 2), (4, 5), (5, 6)]),
([(1, 2), (4, 5)], [(0, 1), (1, 2), (2, 3)], [(0, 1), (1, 2), (3, 4)]),
([2, (4, 5)], [1, (0, 2), (2, 3)], [1, (0, 2), (3, 4)]),
([(1, 3)], [(0, 2), (4, 6)], [(0, 2), (3, 4), (5, 6)]),
([(1, 3)], [(0, 2)], [1, (0, 2)]),
([(2, 3), (4, 5), (5, 6)], [(1, 2)], [(1, 2), (3, 4), (4, 6)]),
([(0, 1), (2, 3), (5, 6)], [(1, 2), (4, 5)], [1, (0, 2), (3, 4), (4, 6)]),
([(0, 1), (1, 3)], [(0, 1), (1, 2)]),
([2, (0, 1), (1, 3), (4, 6)], [4, (0, 2), (3, 5), (5, 6)]),
([(0, 2), (2, 3), (4, 5)], [2, (5, 6)], [(0, 2), (3, 4), (5, 6)]),
([2, (0, 3), (4, 5), (5, 6)], [1, (0, 2), (3, 4), (4, 6)]),
([2, (0, 1)], [(2, 3), (5, 6)], [(0, 2), (5, 6)]),
([(0, 1), (2, 3)], [(4, 5), (5, 6)], [(0, 2), (3, 5), (5, 6)]),
([(0, 2), (2, 3), (4, 5)], [(0, 2), (3, 4), (4, 5)]),
([(1, 3)], [(0, 2), (4, 6)], [1, (0, 2), (3, 6)]),
([(1, 3), (4, 6)], [(0, 1)], [1, (0, 2), (3, 6)]),
([(0, 3), (4, 5)], [(1, 2)], [(0, 2), (3, 4)]),
([(2, 3)], [(0, 2), (4, 5)], [1, (0, 2), (3, 4)]),
([(0, 3)], [(0, 2)]),
([(0, 3), (4, 6)], [(0, 2), (3, 6)]),
([(0, 1), (1, 2), (2, 3), (4, 6)], [(0, 1), (1, 2), (3, 4), (4, 6)]),
([(2, 3), (4, 5)], [(1, 2), (3, 5)]),
([(1, 2), (2, 3), (4, 5)], [(1, 2), (3, 4), (4, 5)]),
([(0, 1), (1, 3)], [(1, 2)], [1, (0, 2)]),
([(0, 1), (1, 2)], [(2, 3), (4, 5), (5, 6)], [(1, 2)], [(0, 2), (3, 4), (5, 6)]),
([(0, 1), (1, 3), (4, 6)], [(0, 2), (3, 5), (5, 6)]),
([(1, 2)], [(0, 1), (2, 3)], [1, (0, 2)]),
([(1, 3)], [(0, 1), (4, 6)], [1, (0, 2), (3, 6)]),
([(2, 3), (4, 5)], [1, (0, 2)], [1, (0, 2), (3, 4)]),
([(2, 3), (4, 6)], [(1, 2), (3, 6)]),
([2, (0, 1), (1, 3)], [4, (0, 1), (1, 2)]),
([(4, 5), (5, 6)], [(0, 1), (1, 2)], [(1, 2), (2, 3)], [(0, 2), (3, 4), (5, 6)]),
([2, (0, 1)], [(2, 3), (4, 5), (5, 6)], [(0, 2), (3, 5), (5, 6)]),
([2, (5, 6)], [(0, 2), (2, 3), (4, 5)], [1, (0, 2), (3, 4), (4, 6)]),
([(1, 2), (4, 5)], [(1, 2), (2, 3)], [(1, 2), (3, 4)]),
([(0, 1), (1, 2)], [1, (2, 3)], [1, (0, 2)]),
([(1, 3), (4, 5)], [1, (0, 2)], [1, (0, 2), (3, 4)]),
([1, (2, 3)], [(0, 2), (5, 6)], [1, (0, 2), (4, 6)]),
([(1, 2), (2, 3)], [(1, 2), (4, 5)]),
]
return patterns_raw
| 43,764
| 50.977435
| 96
|
py
|
bfh_python
|
bfh_python-master/dehntwistda.py
|
"""Producing type DA structures for Dehn twists, using local actions."""
from algebra import CobarAlgebra, TensorDGAlgebra, TensorGenerator, \
TensorStarGenerator
from algebra import E0
from autocompleteda import autoCompleteDA, autoCompleteMorphism
from dastructure import DAStructure, MorDAtoDAGenerator, SimpleDAGenerator
from extendbyid import ExtendedDAStructure, LocalDAStructure, \
LocalMorDAtoDAComplex
from extendbyid import identityDALocal
from localpmc import LocalIdempotent, LocalStrandAlgebra, PMCSplitting
from pmc import linearPMC
from utility import memorize, subset
from utility import F2, NEG, POS
import ast
import itertools
class AntiBraidDA(ExtendedDAStructure):
"""Responsible for producing a type DA structure for the anti-braid
resolution (admissible case only), using local actions.
"""
def __init__(self, genus, c_pair):
"""Specifies genus of the starting PMC and the ID of the pair of
anti-braid resolution.
"""
self.genus = genus
self.c_pair = c_pair
self.pmc = linearPMC(genus)
self.n = 4 * genus
self.c1, self.c2 = self.pmc.pairs[c_pair]
if self.c2 == self.c1 + 3:
self.is_degenerate = False
else:
assert self.c2 == self.c1 + 2
assert self.c1 == 0 or self.c2 == self.n - 1
self.is_degenerate = True
if self.is_degenerate:
# One position between c1 and c2, called p
self.p = self.c1 + 1
self.p_pair = self.pmc.pairid[self.p]
else:
# Two positions between c1 and c2, for (d)own and (u)p
self.d = self.c1 + 1
self.u = self.c1 + 2
self.d_pair = self.pmc.pairid[self.d]
self.u_pair = self.pmc.pairid[self.u]
# Necessary to get local DA structure.
self.splitting = PMCSplitting(self.pmc, [(self.c1, self.c2)])
self.local_pmc = self.splitting.local_pmc
self.mapping = self.splitting.local_mapping
# Local DA Structure
self.local_da = self.getLocalDAStructure()
### Uncomment to use autocompleteda to construct arrows from seeds.
# autoCompleteDA(self.local_da, ([]))
# Initiate the ExtendedDAStructure
ExtendedDAStructure.__init__(self, self.local_da,
self.splitting, self.splitting)
def getLocalDAStructure(self):
"""Returns the local type DA structure associated to the anti-braid
resolution.
"""
if self.is_degenerate:
if self.c1 == 0:
patterns_raw = self._get_patterns_bottom()
else:
assert self.c2 == self.n - 1
patterns_raw = self._get_patterns_top()
else:
patterns_raw = self._get_patterns_middle()
arrow_patterns = {}
for pattern in patterns_raw:
start_class, end_class = pattern[0], pattern[1]
coeffs_a = []
for i in range(2, len(pattern)-1):
coeffs_a.append(self.local_pmc.sd(pattern[i]))
key = (start_class, end_class, tuple(coeffs_a))
if key not in arrow_patterns:
arrow_patterns[key] = []
arrow_patterns[key].append(self.local_pmc.sd(pattern[-1]))
# Now start construction of the local DA structure.
alg = LocalStrandAlgebra(F2, self.local_pmc)
local_c_pair = 0
# Compute the set of local generators. Generators of class 0 has
# idempotents (l_idem, r_idem) where l_idem has the c_pair and
# r_idem has one of the u or d pairs (with the rest being the same).
# Generators of class 1 and 2 has l_idem = r_idem such that c_pair
# is in both.
if self.is_degenerate:
single_idems = [1] # local p_pair
da_idems_0 = [([0], [1])]
else: # Non-degenerate case
single_idems = [1, 2] # local d_pair and local u_pair
da_idems_0 = [([0], [1]), ([0], [2]),
([0, 1], [1, 2]), ([0, 2], [1, 2])] # class 0
local_da = LocalDAStructure(
F2, alg, alg, single_idems1 = single_idems,
single_idems2 = single_idems)
for i in range(len(da_idems_0)): # class 0
l_idem, r_idem = da_idems_0[i]
local_da.addGenerator(SimpleDAGenerator(
local_da, LocalIdempotent(self.local_pmc, l_idem),
LocalIdempotent(self.local_pmc, r_idem), "0_%d" % i))
all_idems = subset(list(range(self.local_pmc.num_pair))) # class 1 and 2
for i in range(len(all_idems)):
idem = LocalIdempotent(self.local_pmc, all_idems[i])
if local_c_pair in idem:
local_da.addGenerator(
SimpleDAGenerator(local_da, idem, idem, "1_%d" % i))
local_da.addGenerator(
SimpleDAGenerator(local_da, idem, idem, "2_%d" % i))
mod_gens = local_da.getGenerators()
# Have to take care of u_map. It is sufficient to know that u_map musst
# preserve class of generators.
for i in range(len(single_idems)):
idem = single_idems[i]
for local_gen in mod_gens:
idem1, idem2 = local_gen.idem1, local_gen.idem2
if idem in idem1 and idem in idem2:
# local_gen is eligible for u_maps[i]
target_idem1 = idem1.removeSingleHor([idem])
target_idem2 = idem2.removeSingleHor([idem])
target_gen = [target for target in mod_gens
if target.idem1 == target_idem1 and
target.idem2 == target_idem2 and
target.name[0] == local_gen.name[0]]
assert len(target_gen) == 1
local_da.add_u_map(i, local_gen, target_gen[0])
# Check all u_map has been filled.
local_da.auto_u_map()
# Add arrows according to arrow_pattern.
for key in list(arrow_patterns.keys()):
start_class, end_class, coeffs_a = key
if len(coeffs_a) == 1 and coeffs_a[0].isIdempotent():
continue
for coeff_d in arrow_patterns[key]:
used = False
for x, y in itertools.product(mod_gens, mod_gens):
if x.name[0] == "%d" % start_class and \
y.name[0] == "%d" % end_class and \
DAStructure.idemMatchDA(x, y, coeff_d, coeffs_a):
local_da.addDelta(x, y, coeff_d, coeffs_a, 1)
used = True
if not used:
print("Warning: unused arrow: %s %s" % (coeffs_a, coeff_d))
return local_da
def _get_patterns_middle(self):
"""Returns the local patterns."""
# Local PMC is 0*-1-2-3-4-5*, with 1 and 4 paired.
input_patterns = open("antibraid_arrows.data", "r")
patterns_raw = ast.literal_eval(input_patterns.read())
return patterns_raw
def _get_patterns_bottom(self):
# Local PMC is 0-1-2-3*, with 0 and 2 paired.
patterns_raw = [
# Initial patterns
(1, 2, [0]), (1, 2, [0, 1]),
(0, 2, [(1, 2)], [0]),
(1, 0, [(0, 1)], [0]),
(1, 2, [(0, 2)], [0]),
(1, 2, [1, (0, 2)], [0, 1]),
# Seed for the middle regions
(0, 0, [(0, 2)]),
# Added for the middle regions
(2, 2, [(0, 2)]),
(2, 2, [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
(1, 1, [(0, 2)]),
(1, 1, [1, (0, 2)]),
(1, 2, [(0, 1), (1, 2)], [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
(2, 1, [(0, 1), (1, 2)]),
(1, 1, [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
(2, 2, [1, (0, 2)]),
]
return patterns_raw
def _get_patterns_top(self):
# Local PMC is 0*-1-2-3, with 1 and 3 paired.
# Simply add one to everything in _get_patterns_bottom
def translate(pattern):
result = []
for entry in pattern:
if isinstance(entry, int):
result.append(entry + 1)
else:
result.append((entry[0] + 1, entry[1] + 1))
return result
patterns_raw = []
for arrow_pattern in self._get_patterns_bottom():
patterns_raw.append(
arrow_pattern[:2] + tuple([translate(pattern)
for pattern in arrow_pattern[2:]]))
return patterns_raw
class DehnSurgeryDA(object):
"""Responsible for computing the type DA morphism of a Dehn surgery, between
the identity and anti-braid type DA bimodules.
"""
def __init__(self, genus, c_pair, orientation):
"""Specifies genus of the starting pmc, id of the pair of Dehn twist,
and orientation of the twist (POS or NEG).
"""
self.genus = genus
self.orientation = orientation
self.start_pmc = linearPMC(genus)
self.end_pmc = self.start_pmc
self.n = 4 * genus
self.c1, self.c2 = self.start_pmc.pairs[c_pair]
self.c_pair = c_pair
if self.c2 == self.c1 + 3:
self.is_degenerate = False
else:
assert self.c2 == self.c1 + 2
assert self.c1 == 0 or self.c2 == self.n - 1
self.is_degenerate = True
if not self.is_degenerate:
# Two positions between c1 and c2, for (d)own and (u)p
self.d = self.c1 + 1
self.u = self.c1 + 2
self.splitting = PMCSplitting(self.start_pmc, [(self.c1, self.c2)])
self.local_pmc = self.splitting.local_pmc
def __eq__(self, other):
return self.genus == other.genus and self.c_pair == other.c_pair and \
self.orientation == other.orientation
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(("DehnSurgeryDA", self.genus, self.c_pair,
self.orientation))
@memorize
def getMappingCone(self):
return ExtendedDAStructure(
self.getLocalMappingCone(), self.splitting, self.splitting)
@memorize
def getLocalMappingCone(self):
morphism = self.getLocalMorphism()
morphism_cx = morphism.getElt().parent
return morphism_cx.getMappingCone(morphism)
@memorize
def getLocalMorphism(self):
"""Returns the morphism (element of MorDAtoDAComplex, consisting of
MorDAtoDAGenerators) between identity and anti-braid corresponding to
this Dehn surgery.
"""
id_local_da = identityDALocal(self.local_pmc)
ab_local_da = AntiBraidDA(self.genus, self.c_pair).getLocalDAStructure()
if self.orientation == NEG:
source = id_local_da
target = ab_local_da
else:
source = ab_local_da
target = id_local_da
source_gens = source.getGenerators()
target_gens = target.getGenerators()
morphism_cx = LocalMorDAtoDAComplex(F2, source, target)
alg = self.local_pmc.getAlgebra()
cobar_alg = CobarAlgebra(alg)
tensor_alg = TensorDGAlgebra((alg, cobar_alg))
morphism = E0
if self.is_degenerate:
if self.c1 == 0:
patterns_raw = self._get_patterns_bottom()
else:
assert self.c2 == self.n - 1
patterns_raw = self._get_patterns_top()
else:
patterns_raw = self._get_patterns_middle()
arrow_patterns = dict()
for pattern in patterns_raw:
start_class, end_class = pattern[0], pattern[1]
coeffs_a = []
for i in range(2, len(pattern)-1):
coeffs_a.append(self.local_pmc.sd(pattern[i]))
key = (start_class, end_class, tuple(coeffs_a))
if key not in arrow_patterns:
arrow_patterns[key] = []
arrow_patterns[key].append(self.local_pmc.sd(pattern[-1]))
# Add arrows according to arrow_pattern.
for key in list(arrow_patterns.keys()):
s_class, e_class, coeffs_a = key
if len(coeffs_a) == 1 and coeffs_a[0].isIdempotent():
continue
for coeff_d in arrow_patterns[key]:
used = False
for x, y in itertools.product(source_gens, target_gens):
if (s_class == -1 or x.name[0] == "%d" % s_class) and \
(e_class == -1 or y.name[0] == "%d" % e_class) and \
DAStructure.idemMatchDA(x, y, coeff_d, coeffs_a):
morphism += 1 * MorDAtoDAGenerator(
morphism_cx, coeff_d, coeffs_a, x, y)
used = True
if not used:
print("Warning: unused arrow: %s %s" % (coeffs_a, coeff_d))
### Uncomment to use autocompleteda to construct arrows from seeds.
# autoCompleteMorphism(source, target, morphism)
return morphism
def _get_patterns_middle(self):
"""Returns the local patterns."""
# Local PMC is 0*-1-2-3-4-5*, with 1 and 4 paired.
if self.orientation == NEG:
input_patterns = open("dehntwist_neg_arrows.data", "r")
else:
input_patterns = open("dehntwist_pos_arrows.data", "r")
patterns_raw = ast.literal_eval(input_patterns.read())
return patterns_raw
def _get_patterns_bottom(self):
# Local PMC is 0-1-2-3*, with 0 and 2 paired.
if self.orientation == NEG:
patterns_raw = [
# Seeds
(-1, 0, [(1, 2)]),
# Added
(-1, 2, [0, 1]),
(-1, 1, [1, (0, 2)]),
(-1, 2, [(1, 2), (2, 3)], [0, (1, 3)]),
(-1, 0, [1, (2, 3)], [0, (1, 3)]),
(-1, 1, [(0, 2)]),
(-1, 2, [0]),
]
else: # self.orientation == POS
patterns_raw = [
# Seeds
(0, -1, [(0, 1)]),
# Added
(1, -1, [(0, 1), (1, 2)], [(1, 2), (2, 3)], [(1, 2), (2, 3)]),
(1, -1, [1, (0, 3)], [1, (2, 3)]),
(1, -1, [(0, 1), (1, 2)], [0, (1, 3)], [0, (1, 3)]),
(2, -1, [1, (0, 3)], [1, (0, 3)]),
(1, -1, [(0, 2)], [0]),
(2, -1, [1, (0, 2)], [1, (0, 2)]),
(0, -1, [(1, 2)], [0]),
(1, -1, [0, (1, 3)], [(0, 2)], [(1, 2), (2, 3)]),
(1, -1, [(0, 1), (1, 3)], [(1, 2)], [(1, 2), (2, 3)]),
(1, -1, [1, (0, 2)], [0, 1]),
(2, -1, [(0, 3)], [(0, 3)]),
(1, -1, [(0, 1), (1, 2)], [(0, 1), (1, 2)], [(0, 1), (1, 2)]),
(2, -1, [(0, 2)], [(0, 2)]),
(0, -1, [(1, 3)], [(2, 3)]),
(1, -1, [(0, 3)], [(2, 3)]),
(1, -1, [(0, 1), (1, 2)], [(0, 1), (1, 3)], [(0, 1), (1, 3)]),
(2, -1, [(0, 1)], [(0, 1)]),
]
return patterns_raw
def _get_patterns_top(self):
# Local PMC is 0*-1-2-3, with 1 and 3 paired.
if self.orientation == NEG:
# This case doesn't work.
assert False
else: # self.orientation == POS
patterns_raw = [
# Seeds
(0, -1, [(1, 2)]),
# Added
(1, -1, [(0, 1), (1, 2)], [1, (0, 2)]),
(1, -1, [1]),
(0, -1, [2, (0, 1)], [1, (0, 2)]),
(2, -1, [(1, 3)]),
(1, -1, [1, 2]),
(2, -1, [2, (1, 3)]),
]
return patterns_raw
| 15,939
| 38.068627
| 81
|
py
|
bfh_python
|
bfh_python-master/minusalg.py
|
"""Strand algebra for the minus theory."""
from algebra import Generator, SimpleChainComplex
from algebra import E0
from pmc import PMC, Strands, StrandAlgebra, StrandDiagram
from pmc import splitPMC
from utility import memorize
from utility import F2
class MinusStrands(Strands):
"""The corresponding Strands class for the strand algebra in the minus
theory. The main difference with Strands in pmc.py is that the moving
strands can go through the basepoint, hence the starting point may be
greater than the ending point (or equal, in the case where the moving strand
covers the entire PMC).
"""
def __init__(self, pmc, data):
# Currently only supporting torus case
assert pmc == splitPMC(1)
self.pmc = pmc
# Compute multiplicity.
self.multiplicity = [0] * self.pmc.n
for st in self:
assert len(st) == 2
start, end = st[0], st[1]
if end <= start:
end += self.pmc.n
for pos in range(start, end):
self.multiplicity[pos % self.pmc.n] += 1
class MinusStrandDiagram(StrandDiagram):
"""The corresponding Strands class for the strand algebra in the minus
theory.
"""
def __init__(self, parent, left_idem, strands, right_idem = None):
"""Be sure to use MinusStrands to convert strands."""
if not isinstance(strands, MinusStrands):
strands = MinusStrands(parent.pmc, strands)
StrandDiagram.__init__(self, parent, left_idem, strands, right_idem)
class MinusStrandAlgebra(StrandAlgebra):
"""The corresponding Strands class for the strand algebra in the minus
theory.
"""
def __init__(self, ring, pmc):
"""Specifies the PMC. Assume multiplicity one and middle idempotent
size.
"""
StrandAlgebra.__init__(self, ring, pmc, pmc.genus, mult_one = True)
@memorize
def getGenerators(self):
# Only implemented this case
assert self.pmc == splitPMC(1)
n = 4
algebra = MinusStrandAlgebra(F2, self.pmc)
result = []
idems = self.pmc.getIdempotents(algebra.idem_size)
for idem in idems:
result.append(MinusStrandDiagram(algebra, idem, []))
# Only one strand
for start in range(n):
for end in range(n):
strands = MinusStrands(self.pmc, [(start, end)])
for l_idem in idems:
if strands.leftCompatible(l_idem):
result.append(
MinusStrandDiagram(algebra, l_idem, strands))
return result
@memorize
def diff(self, gen):
# Differential is zero in the torus algebra.
return E0
@memorize
def multiply(self, gen1, gen2):
if not isinstance(gen1, MinusStrandDiagram):
return NotImplemented
if not isinstance(gen2, MinusStrandDiagram):
return NotImplemented
assert gen1.parent == self and gen2.parent == self, \
"Algebra not compatible."
if gen1.right_idem != gen2.left_idem:
return E0
# Enforce the multiplicity one condition
total_mult = [m1+m2 for m1, m2 in zip(gen1.multiplicity,
gen2.multiplicity)]
if not all([x <= 1 for x in total_mult]):
return E0
pmc = gen1.pmc
new_strands = []
# Keep track of which strands at right are not yet used.
strands_right = list(gen2.strands)
for sd in gen1.strands:
mid_idem = pmc.pairid[sd[1]]
possible_match = [sd2 for sd2 in strands_right
if pmc.pairid[sd2[0]] == mid_idem]
if len(possible_match) == 0:
new_strands.append(sd)
else: # len(possible_match) == 1
sd2 = possible_match[0]
if sd2[0] != sd[1]:
return E0
else:
new_strands.append((sd[0], sd2[1]))
strands_right.remove(sd2)
new_strands.extend(strands_right)
mult_term = MinusStrandDiagram(self, gen1.left_idem, new_strands,
gen2.right_idem)
# No problem with double crossing in the multiplicity one case.
return mult_term.elt()
def minusSD(pmc, data):
"""Simple way to obtain a minus strand diagram. Each element of data is
either an integer or a pair. An integer specifies a double horizontal at
this position (and its paired position). A pair (p, q) specifies a strand
from p to q.
"""
parent = MinusStrandAlgebra(F2, pmc)
assert parent.idem_size == len(data)
left_idem = []
strands = []
for d in data:
if isinstance(d, int):
left_idem.append(pmc.pairid[d])
else:
left_idem.append(pmc.pairid[d[0]])
strands.append(d)
return MinusStrandDiagram(parent, left_idem, strands)
def getHalfIdComplex():
"""Returns the chain complex underlying the proposed quasi-inverse of the
type DD bimodule for half-identity.
"""
class LargeComplexGenerator(Generator, tuple):
"""A generator of the large chain complex. Specified by two strand
diagrams in the given PMC.
"""
def __new__(cls, parent, sd_left, sd_right):
return tuple.__new__(cls, (sd_left, sd_right))
def __init__(self, parent, sd_left, sd_right):
"Specifies the two strand diagrams."""
# Note tuple initialization is automatic
Generator.__init__(self, parent)
# Dictionary mapping total multiplicity profile to chain complexes.
partial_cxs = dict()
pmc = splitPMC(1)
alg = MinusStrandAlgebra(F2, pmc)
# Find the set of generators.
alg_gens = alg.getGenerators()
for gen_left in alg_gens:
for gen_right in alg_gens:
if gen_left.getLeftIdem() == gen_right.getLeftIdem().comp():
total_mult = [a + b for a, b in zip(
gen_left.multiplicity, gen_right.multiplicity)]
total_mult = tuple(total_mult)
if total_mult not in partial_cxs:
partial_cxs[total_mult] = SimpleChainComplex(F2)
cur_gen = LargeComplexGenerator(
partial_cxs[total_mult], gen_left, gen_right)
partial_cxs[total_mult].addGenerator(cur_gen)
def hasDifferential(gen_from, gen_to):
"""Determine whether there is a differential between two generators
of the large chain complex.
"""
left_from, right_from = gen_from
left_to, right_to = gen_to
mult_left_from, mult_left_to = \
left_from.multiplicity, left_to.multiplicity
diff = [a-b for a, b in zip(mult_left_from, mult_left_to)]
if all([n == 0 for n in diff]):
if left_from == left_to and right_to in right_from.diff():
return True
if right_from == right_to and left_from in left_to.diff():
return True
elif all([n == 0 or n == 1 for n in diff]):
pos_one = [i for i in range(len(diff)) if diff[i] == 1]
if len(pos_one) != 1:
return False
start, end = pos_one[0], (pos_one[0]+1)%4
st_move = MinusStrands(pmc, [(start, end)])
if not st_move.rightCompatible(left_to.getLeftIdem()):
return False
left_move = MinusStrandDiagram(
alg, None, st_move, left_to.getLeftIdem())
if not st_move.rightCompatible(right_from.getLeftIdem()):
return False
right_move = MinusStrandDiagram(
alg, None, st_move, right_from.getLeftIdem())
return left_move * left_to == 1*left_from and \
right_move * right_from == 1*right_to
else:
return False
# Compute differentials
for total_mult, cx in list(partial_cxs.items()):
gens = cx.getGenerators()
for gen_from in gens:
for gen_to in gens:
if hasDifferential(gen_from, gen_to):
cx.addDifferential(gen_from, gen_to, 1)
print(total_mult, cx)
cx.checkDifferential()
cx.simplify()
| 8,385
| 36.106195
| 80
|
py
|
bfh_python
|
bfh_python-master/utility.py
|
"""Various utilities useful for the project."""
from math import gcd
from numbers import Number
def memorize(function):
"""Function decorator: memorize returned values of this function.
Based on: Daniel Lawrence, A simple example using a python cache decorator.
"""
memo = {}
class NoneSymbol(object):
pass
def wrapper(*args, **kwargs):
key = args + tuple((k, v) for k, v in list(kwargs.items()))
# Use memorization. Don't keep NotImplemented values.
# Will throw TypeError if key is not hashable.
val = memo.get(key, NoneSymbol)
if val is not NoneSymbol:
return val
else:
rv = function(*args, **kwargs)
if rv is not NotImplemented:
memo[key] = rv
return rv
return wrapper
def memorizeHash(function):
"""Decorator for hash function. Memorize hash value within the object
attribute _hash_val.
"""
def wrapper(*args):
_self = args[0]
if hasattr(_self, '_hash_val'):
return _self._hash_val
else:
rv = function(*args)
_self._hash_val = rv
return rv
return wrapper
def trace(function):
"""Decorator: print input and ouput for every invocation of the function."""
def wrapper(*args):
print("\nInputs:", args)
rv = function(*args)
print("Output:", rv)
return rv
return wrapper
def tolist(obj):
"""Force obj into a list."""
if isinstance(obj, list):
return obj
else:
return [obj]
def flatten(lst):
"""Flatten a once-nested list."""
return sum(lst, [])
def fracToInt(frac):
"""Convert frac (which must have integer value regardless of type)
to integers.
"""
if isinstance(frac, int):
return frac
assert frac.denominator == 1
return frac.numerator
def find(lst, item):
"""Find the location of the first occurrence of item in lst. Returns -1 if
item does not exist in lst.
"""
for i in range(len(lst)):
if lst[i] == item:
return i
return -1
def sumColumns(matrix, num_col):
"""matrix is a list of lists of length num_col. Sum up by column."""
result = [0]*num_col
for row in matrix:
assert len(row) == num_col
for i in range(num_col):
result[i] += row[i]
return result
def subset(elts):
"""Returns the list of subsets of the given set (each element is a tuple).
"""
if len(elts) == 0:
return [()]
all_but_zero = subset(elts[1:])
return all_but_zero + [(elts[0],) + s for s in all_but_zero]
def _dictAddTo(dict1, dict2):
"""Add dict2 onto dict1 in place. If dict2 is a list, add each element of
dict2 in place.
"""
dict2 = [curdict.copy() for curdict in tolist(dict2) if curdict != 0]
if dict1 == 0:
if len(dict2) == 0:
return dict1
else:
dict1 = dict2[0]
dict2 = dict2[1:]
for curdict in dict2:
assert type(dict1) == type(curdict), "Incompatible types: %s, %s" % \
(str(type(dict1)), str(type(curdict)))
for k, v in list(curdict.items()):
if k in dict1:
dict1[k] += v
if dict1[k] == 0:
del dict1[k]
else:
dict1[k] = v
return dict1
def _dictMult(dict1, scalar):
"""Return a new dictionary with same type as self, the same keys, and
each value multiplied by scalar.
"""
if not isinstance(scalar, Number):
return NotImplemented
result = type(dict1)((k, scalar * v) for k, v in list(dict1.items()) if
scalar * v != 0)
return result
class SummableDict(dict):
"""A dictionary type that supports sums and multiplication by scalar. Works
in the same way as a free module with generators as keys and coefficients
as values. Zeroes are automatically thrown away.
"""
def __add__(self, other):
return _dictAddTo(type(self)(), [self, other])
def __iadd__(self, other):
return _dictAddTo(self, other)
def __sub__(self, other):
return _dictAddTo(type(self)(), [self, -1*other])
def __isub__(self, other):
return _dictAddTo(self, -1*other)
def accumulate(self, lst):
"""Similar to +=, except returns the sum."""
return _dictAddTo(self, lst)
def __mul__(self, other):
return _dictMult(self, other)
def __rmul__(self, other):
return _dictMult(self, other)
def __eq__(self, other):
"""Comparison to 0 is a test for empty dictionary."""
if isinstance(other, int) and other == 0:
return len(self) == 0
else:
return dict.__eq__(self, other)
def __ne__(self, other):
return not (self == other)
def copy(self):
"""Copy function should preserve type."""
return type(self)(self)
def translateKey(self, key_map):
"""Translate keys of this dictionary using the dictionary key_map. All
keys in self must appear in key_map and will be replaced by the
corresponding value.
"""
return type(self)([(key_map[k], v) for k, v in list(self.items())])
def getElt(self):
"""Returns an arbitrary key from this dictionary. Must be non-empty."""
return next(iter(self))
class Ring(object):
def convert(self, data):
"""Try to convert data to an element of this ring."""
raise NotImplementedError("convert function not specified for ring.")
class RingElement(Number):
pass
class ModNRing(Ring):
"""The ring Z/nZ."""
def __init__(self, n):
self.n = n
self.zero = self.convert(0)
self.one = self.convert(1)
def add(self, elt1, elt2):
elt1, elt2 = self.convert(elt1), self.convert(elt2)
if elt1 is NotImplemented or elt2 is NotImplemented:
return NotImplemented
return ModNElement(self, (elt1.val+elt2.val)%self.n)
def multiply(self, elt1, elt2):
elt1, elt2 = self.convert(elt1), self.convert(elt2)
if elt1 is NotImplemented or elt2 is NotImplemented:
return NotImplemented
return ModNElement(self, (elt1.val*elt2.val)%self.n)
def __eq__(self, other):
return self.n == other.n
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.n, "ModNRing"))
def convert(self, data):
"""Try to convert data to an element of this ring."""
if isinstance(data, ModNElement) and data.parent == self:
return data
if isinstance(data, int):
return ModNElement(self, data % self.n)
return NotImplemented
class ModNElement(RingElement):
"""An element in a ring Z/nZ."""
def __init__(self, parent, val):
self.parent = parent
self.val = val
def __str__(self):
return str(self.val)
def __repr__(self):
return str(self.val)
def __add__(self, other):
return self.parent.add(self, other)
def __radd__(self, other):
return self.parent.add(self, other)
def __mul__(self, other):
return self.parent.multiply(self, other)
def __rmul__(self, other):
return self.parent.multiply(self, other)
def __eq__(self, other):
"""Can compare to integer 0 or 1."""
if isinstance(other, int) and (other == 0 or other == 1):
return self.val == other
else:
return self.val == other.val
def invertible(self):
"""Returns whether this element is invertible in the ring."""
return gcd(self.val, self.parent.n) == 1
def inverse(self):
"""Returns the inverse of this element. Must be invertible"""
# Currently only implemented for n = 2
assert self.parent.n == 2
return self
class Integer(Ring):
"""The ring Z."""
def convert(self, data):
"""Try to convert data to an element of this ring."""
assert isinstance(data, int)
return data
class IntegerElement(RingElement, int):
"""An element in a ring Z."""
def __new__(cls, parent, val):
return int.__new__(cls, val)
def __init__(self, parent, val):
self.parent = parent
class NamedObject(object):
"""Provides functionality for an object to be described by name. If this is
listed as a parent class, an object will use name in equality comparisons,
hash functions, and string outputs.
"""
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.name < other.name
def __le__(self,other):
return self.name <= other.name
def __gt__(self, other):
return self.name > other.name
def __ge__(self, other):
return self.name >= other.name
@memorizeHash
def __hash__(self):
return hash(self.name)
def __str__(self):
return str(self.name)
def __repr__(self):
return str(self.name)
class MorObject(object):
"""If this is list as a parent class, an object be treated as a generator
of some morphism complex. It will have source, coeff, and target fields.
These will be used for equality comparisons, hash functions, and string
outputs.
"""
def __init__(self, source, coeff, target):
self.source = source
self.coeff = coeff
self.target = target
def __eq__(self, other):
return self.source == other.source and self.coeff == other.coeff \
and self.target == other.target
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash((self.source, self.coeff, self.target))
def __str__(self):
return "%s->%s*%s" % \
(str(self.source), str(self.coeff), str(self.target))
def __repr__(self):
return str(self)
def safeMultiply(a, b):
"""Safely multiply the two sides using __mul__ and __rmul__. Return
NotImplemented if both fails.
"""
try:
prod = a.__mul__(b)
except TypeError:
prod = NotImplemented
if prod is NotImplemented:
try:
prod = b.__rmul__(a)
except TypeError:
prod = NotImplemented
return prod
# Most commonly used rings: Z/2Z and Z
F2 = ModNRing(2)
ZZ = Integer()
# Constants for positive and negative orientation
POS, NEG = 1, -1
# Constants for left and right action
ACTION_LEFT, ACTION_RIGHT = 0, 1
def sideStr(side):
if side == ACTION_LEFT: return "LEFT"
else: return "RIGHT"
def oppSide(side):
if side == ACTION_LEFT: return ACTION_RIGHT
else: return ACTION_LEFT
# Constant for controlling amount of printing
PRINT_PROGRESS = 1
# How much assertions do you want to do? Higher value means more checks and
# slower program
ASSERT_LEVEL = 0
# For each grading group and grading set object, its type attribute is one of
# these
BIG_GRADING, SMALL_GRADING = 0, 1
DEFAULT_GRADING = SMALL_GRADING
def grTypeStr(gr_type):
if gr_type == BIG_GRADING: return "big"
else: return "small"
# Whether to use multiplicity-one algebra (or the full algebra)
MULT_ONE = True
| 11,442
| 27.046569
| 80
|
py
|
bfh_python
|
bfh_python-master/linalg.py
|
"""Linear algebra, including integral linear algebra."""
from fractions import Fraction
from utility import fracToInt, memorize
class RowSystem(object):
"""Manage a list of row vectors of integers. Find both integer and rational
linear combinations of these vectors that sum to zero or another row
vector.
"""
def __init__(self, vecs):
"""vecs is a nonempty list of vectors. Each element of vecs is a vector
of integers (rational inputs are possible, but they must have integer
value, and will be converted to integers.
"""
assert len(vecs) > 0
self.ori_vecs = [list(vec) for vec in vecs]
self.ori_vecs = [[fracToInt(n) for n in vec]
for vec in self.ori_vecs]
self.num_row = len(self.ori_vecs)
self.num_col = len(self.ori_vecs[0])
self._rowReduce()
self.num_reduced_row = len(self.reduced_vecs)
assert self.num_reduced_row == len(self.pivot)
def __str__(self):
result = "Row system with rows:"
for vec in self.ori_vecs:
result += "\n"+"\t".join(["%d" % n for n in vec])
return result
def __repr__(self):
return str(self)
def _rowReduce(self):
"""Perform integeral row reduction. Produces three lists of vectors for
this object:
*. self.reduced_vecs: list of reduced vectors. These forms a basis for
the subspace spanned by the original vectors, in the echelon form.
*. self.reduced_comb: each row corresponds to a reduced vector, it
expresses the reduced vector as a linear combination of the original
vectors (note this is not necessarily unique).
*. self.zero_comb: each row corresponds to a (integer) linear relation
between the original vectors.
Also produces self.pivot, which maps row number in reduced_vecs to the
position of pivot in that row (first nonzero entry).
"""
reduced_vecs = [list(vec) for vec in self.ori_vecs]
# Expresses each row of reduced_vecs as a linear combination of rows
# in ori_vecs. Initially this is just the identity.
combs = [[0]*self.num_row for i in range(self.num_row)]
for i in range(self.num_row):
combs[i][i] = 1
def swap_row(a, b):
"""Swaps row #a and #b of reduced_vecs."""
for i in range(self.num_col):
reduced_vecs[a][i], reduced_vecs[b][i] = \
reduced_vecs[b][i], reduced_vecs[a][i]
for i in range(self.num_row):
combs[a][i], combs[b][i] = combs[b][i], combs[a][i]
def multiply_row(a, factor):
"""Multiply row #a by factor."""
for i in range(self.num_col):
reduced_vecs[a][i] *= factor
for i in range(self.num_row):
combs[a][i] *= factor
def add_multiple(add_from, add_to, factor):
"""Add factor*(row #add_from) onto row #add_to."""
for i in range(self.num_col):
reduced_vecs[add_to][i] += (factor * reduced_vecs[add_from][i])
for i in range(self.num_row):
combs[add_to][i] += (factor * combs[add_from][i])
cur_row, cur_col = 0, 0
self.pivot = []
while cur_row < self.num_row and cur_col < self.num_col:
# Find an entry with minimum zero absolute value in this column, at
# or below cur_row
min_val, min_row = 0, -1
for row in range(cur_row, self.num_row):
cur_val = reduced_vecs[row][cur_col]
if cur_val != 0 and (min_val == 0 or abs(cur_val) < min_val):
min_val, min_row = abs(cur_val), row
# If all entries in this column at or below cur_row are zero, move
# to next column
if min_val == 0:
cur_col += 1
continue
# Otherwise, swap minimum row onto cur_row, and correct for signs
swap_row(cur_row, min_row)
if reduced_vecs[cur_row][cur_col] < 0:
multiply_row(cur_row, -1)
assert min_val == reduced_vecs[cur_row][cur_col]
# Reduce the remaining rows
all_gcd = True
for row in range(cur_row+1, self.num_row):
factor = -reduced_vecs[row][cur_col] // min_val
add_multiple(cur_row, row, factor)
if reduced_vecs[row][cur_col] != 0:
all_gcd = False
# If min_val is the gcd of remaining values in the column (so all
# other values in the column are now zero), we are done. Otherwise,
# we need to repeat this with a smaller min_val.
if all_gcd:
self.pivot.append(cur_col)
cur_row += 1
# At this point, all rows of self.reduced_vecs at or below cur_row
# should be zero.
self.reduced_vecs = reduced_vecs[0:cur_row]
self.reduced_comb = combs[0:cur_row]
self.zero_comb = combs[cur_row:]
def getZeroComb(self):
"""Returns a list of linear relations between the original vectors."""
return self.zero_comb
def vecReduce(self, vec, use_rational = False):
"""Reduce the given vector to a standard form. If use_rational is set
to True, then rational multiples of the original vectors are allowed.
Otherwise only integer multiples are allowed to reduce the vector.
If use_rational is set to false, only integer values are allowed
in vec.
Returns a tuple (comb, reduced_vec), where comb is a vector of length
num_row, specifying the linear combination of original vectors that,
when subtracted, will yield the standard form reduced_vec.
"""
if not use_rational:
vec = [fracToInt(n) for n in vec]
comb = [0] * self.num_row
cur_row = 0
for col in range(self.num_col):
if vec[col] == 0:
continue
while cur_row < self.num_reduced_row and self.pivot[cur_row] < col:
cur_row += 1
if cur_row >= self.num_reduced_row or self.pivot[cur_row] != col:
continue
pivot_val = self.reduced_vecs[cur_row][col]
assert pivot_val > 0
if use_rational:
factor = Fraction(vec[col]) / pivot_val
else:
factor = vec[col] // pivot_val # integer division
for i in range(self.num_col):
vec[i] -= self.reduced_vecs[cur_row][i] * factor
for i in range(self.num_row):
comb[i] += self.reduced_comb[cur_row][i] * factor
if use_rational:
assert vec[col] == 0
else:
# Python definition of integer division means
assert vec[col] >= 0 and vec[col] < pivot_val
return (comb, vec)
@memorize
def reduceProfile(self, use_rational = False):
"""Returns a vector of length num_col, indicating at each position, the
range of possible values returned by vec_reduce. A value of 0 means any
integer is possible. Otherwise, a value of n > 0 indicates the range
[0, n).
"""
profile = [0] * self.num_col
for row in range(self.num_reduced_row):
col = self.pivot[row]
profile[col] = self.reduced_vecs[row][col]
assert profile[col] > 0
if use_rational:
profile[col] = 1
return profile
def shortForm(self, vec, use_rational = False):
"""Call vecReduce, but collect only those entries in the standard form
that contain information (not always zero).
"""
comb, reduced_vec = self.vecReduce(vec, use_rational)
profile = self.reduceProfile(use_rational)
result = [reduced_vec[i]
for i in range(self.num_col) if profile[i] != 1]
return result
def getComb(self, vec, use_rational = False):
"""Write the given vec as a linear combination of the original vectors.
Note the answer is not necessarily uniquely specified. Return None if
this is impossible.
"""
comb, reduced_vec = self.vecReduce(vec, use_rational)
if all([n == 0 for n in reduced_vec]):
return comb
else:
return None
class F2RowSystem(object):
"""Linear algebra over field F2. Currently using just 0 and 1's (so the F2
from utility.py is not involved.
"""
def __init__(self, vecs):
"""Initialize with vecs, a matrix with 0/1 values."""
assert len(vecs) > 0
self.ori_vecs = [list(vec) for vec in vecs]
self.num_row = len(self.ori_vecs)
self.num_col = len(self.ori_vecs[0])
self._rowReduce()
self.num_reduced_row = len(self.reduced_vecs)
assert self.num_reduced_row == len(self.pivot)
def __str__(self):
result = "Row system with rows:"
for vec in self.ori_vecs:
result += "\n"+"\t".join(["%d" % n for n in vec])
return result
def __repr__(self):
return str(self)
def _rowReduce(self):
"""Performs row reduction. Similar to the function of the same name in
RowSystem.
"""
reduced_vecs = [list(vec) for vec in self.ori_vecs]
combs = [[0]*self.num_row for i in range(self.num_row)]
for i in range(self.num_row):
combs[i][i] = 1
def swap_row(a, b):
"""Swaps row #a and #b of reduced_vecs."""
for i in range(self.num_col):
reduced_vecs[a][i], reduced_vecs[b][i] = \
reduced_vecs[b][i], reduced_vecs[a][i]
for i in range(self.num_row):
combs[a][i], combs[b][i] = combs[b][i], combs[a][i]
def add_multiple(add_from, add_to):
"""Add row #add_from onto row #add_to."""
for i in range(self.num_col):
reduced_vecs[add_to][i] += reduced_vecs[add_from][i]
reduced_vecs[add_to][i] %= 2
for i in range(self.num_row):
combs[add_to][i] += combs[add_from][i]
combs[add_to][i] %= 2
cur_row, cur_col = 0, 0
self.pivot = []
while cur_row < self.num_row and cur_col < self.num_col:
# Find an entry with non-zero absolute value in this column, at or
# below cur_row
pivot_row = -1
for row in range(cur_row, self.num_row):
if reduced_vecs[row][cur_col] == 1:
pivot_row = row
break
# If all entries in this column at or below cur_row are zero, move
# to next column
if pivot_row == -1:
cur_col += 1
continue
# Otherwise, swap pivot row onto cur_row
swap_row(cur_row, pivot_row)
# Reduce the remaining rows
for row in range(cur_row+1, self.num_row):
if reduced_vecs[row][cur_col] == 1:
add_multiple(cur_row, row)
self.pivot.append(cur_col)
cur_row += 1
cur_col += 1
# At this point, all rows of self.reduced_vecs at or below cur_row
# should be zero.
self.reduced_vecs = reduced_vecs[0:cur_row]
self.reduced_comb = combs[0:cur_row]
self.zero_comb = combs[cur_row:]
def getZeroComb(self):
"""Returns a list of linear relations between the original vectors."""
return self.zero_comb
def vecReduce(self, vec):
"""Reduce the given vector to a standard form.
Returns a tuple (comb, reduced_vec), where comb is a vector of length
num_row, specifying the linear combination of original vectors that,
when subtracted, will yield the standard form reduced_vec.
"""
comb = [0] * self.num_row
cur_row = 0
for col in range(self.num_col):
if vec[col] == 0:
continue
while cur_row < self.num_reduced_row and self.pivot[cur_row] < col:
cur_row += 1
if cur_row >= self.num_reduced_row or self.pivot[cur_row] != col:
continue
pivot_val = self.reduced_vecs[cur_row][col]
assert pivot_val == 1
for i in range(self.num_col):
vec[i] += self.reduced_vecs[cur_row][i]
vec[i] %= 2
for i in range(self.num_row):
comb[i] += self.reduced_comb[cur_row][i]
comb[i] %= 2
return (comb, vec)
def getComb(self, vec):
"""Write the given vec as a linear combination of the original vectors.
Note the answer is not necessarily uniquely specified. Return None if
this is impossible.
"""
comb, reduced_vec = self.vecReduce(vec)
if all([n == 0 for n in reduced_vec]):
return comb
else:
return None
| 13,142
| 39.070122
| 79
|
py
|
bfh_python
|
bfh_python-master/dstructuretest.py
|
"""Unit test for dstructure.py"""
from dstructure import *
import unittest
class DStructureTest(unittest.TestCase):
def testDStructure(self):
pmc = splitPMC(1)
dstr = SimpleDStructure(F2, pmc.getAlgebra())
genx = SimpleDGenerator(dstr, pmc.idem([0]), "x")
dstr.addGenerator(genx)
dstr.addDelta(genx, genx, pmc.sd([(0,2)]), 1)
self.assertEqual(genx.delta(), 1*TensorGenerator(
(pmc.sd([(0,2)]), genx), dstr.AtensorM))
dstr.reindex()
self.assertEqual(len(dstr), 1)
def testCommonDStructures(self):
dsZero = [zeroTypeD(i) for i in range(1,4)]
for dstr in dsZero:
dstr.testDelta()
# Uncomment to see printout of structures
# print dsZero[0], dsZero[1], dsZero[2]
dsInf = [infTypeD(i) for i in range(1,4)]
for dstr in dsInf:
dstr.testDelta()
# Uncomment to see printout of structures
# print dsInf[0], dsInf[1], dsInf[2]
dsPlat = [platTypeD(i) for i in range(1,4)]
for dstr in dsPlat:
dstr.testDelta()
# Uncomment to see printout of structures
# print dsPlat[0], dsPlat[1], dsPlat[2]
def testMorToD(self):
cx = zeroTypeD(2).morToD(zeroTypeD(2))
self.assertEqual(len(cx), 4)
cx2 = infTypeD(2).morToD(infTypeD(2))
self.assertEqual(len(cx2), 4)
cx3 = zeroTypeD(2).morToD(infTypeD(2))
self.assertEqual(len(cx3), 9)
cx3.simplify()
self.assertEqual(len(cx3), 1)
cx = zeroTypeD(1).morToD(zeroTypeD(1))
cx2 = infTypeD(1).morToD(infTypeD(1))
cx3 = zeroTypeD(1).morToD(infTypeD(1))
cx4 = infTypeD(1).morToD(zeroTypeD(1))
self.assertEqual(cx3.getGradingInfo(), {(1, 2) : 1})
# Uncomment to see morphism complexes for the torus
# print cx, cx2, cx3, cx4
def testCommonDStrMatchDiagram(self):
dstr1 = infTypeD(2)
dstr2 = zeroTypeD(2)
dstr3 = platTypeD(1)
dstr4 = platTypeD(2)
def testAdmDStructures(self):
dstr = zeroTypeDAdm(1)
dstr2 = zeroTypeDAdm(2)
dstr3 = zeroTypeDAdm(3)
dstr3.simplify()
# Uncomment to see printout of structures
# print dstr, dstr2, dstr3
def testDual(self):
dstr1 = zeroTypeD(1).dual()
dstr1.testDelta()
dstr2 = zeroTypeDAdm(1).dual()
dstr2.testDelta()
dstr1.checkGrading()
# Uncomment to see printout of structures
# print dstr1, dstr2
if __name__ == "__main__":
unittest.main()
| 2,592
| 32.24359
| 60
|
py
|
bfh_python
|
bfh_python-master/ddstructuretest.py
|
"""Unit test for ddstructure.py"""
from ddstructure import *
from dstructure import infTypeD, zeroTypeD
from pmc import PMC
from pmc import linearPMC, splitPMC
from utility import DEFAULT_GRADING, SMALL_GRADING
import unittest
class DDStructureTest(unittest.TestCase):
def testCommonIdentityDD(self):
id2 = identityDD(splitPMC(2))
self.assertEqual(len(id2), 6)
self.assertTrue(id2.testDelta())
def testMorToD(self):
id2 = identityDD(splitPMC(2))
d = infTypeD(2)
d2 = id2.morToD(d)
self.assertTrue(d2.testDelta())
d2.simplify()
self.assertEqual(len(d2), 1)
d3 = zeroTypeD(2)
d4 = id2.morToD(d3)
self.assertTrue(d4.testDelta())
d4.simplify()
self.assertEqual(len(d4), 1)
def testToDStructure(self):
ddstr = identityDD(linearPMC(1))
dstr = ddstr.toDStructure()
hochchild = dstr.morToD(dstr)
self.assertEqual(len(hochchild), 18)
hochchild.simplify(find_homology_basis = True)
self.assertEqual(len(hochchild), 4)
meaning_len = [len(gen.prev_meaning)
for gen in hochchild.getGenerators()]
self.assertEqual(sorted(meaning_len), [1,1,1,2])
def testIdentityMatchDiagram(self):
pmc_to_test = [splitPMC(2), PMC([(0,2),(1,6),(3,5),(4,7)])]
for pmc in pmc_to_test:
for idem_size in [0,1,2,3,4]:
ddstr = identityDD(pmc, idem_size)
# Special check for the identity diagram: all gradings should be
# zero
if DEFAULT_GRADING == SMALL_GRADING:
for gen in ddstr.generators:
self.assertEqual(ddstr.grading[gen],
ddstr.gr_set.zero())
def testDual(self):
pmc = PMC([(0,2),(1,6),(3,5),(4,7)])
ddstr = identityDD(pmc)
ddstr_dual = ddstr.dual()
self.assertEqual(ddstr_dual.algebra1, pmc.getAlgebra())
self.assertEqual(ddstr_dual.algebra2, pmc.opp().getAlgebra())
def testHochschildCohomology(self):
# Check HH^*(A)
# Rank 4 is right for HH^* of A in genus 1 case
pmc = splitPMC(1)
ddstr = identityDD(pmc)
cx = ddstr.hochschildCochains()
cx.simplify()
self.assertEqual(len(cx), 4)
# Rank 1 in the extremal strands grading
pmc = splitPMC(2)
ddstr = identityDD(pmc, 0)
cx = ddstr.hochschildCochains()
cx.simplify()
self.assertEqual(len(cx), 1)
# Rank should be indepdendent of PMC (equals 16)
for pmc in [splitPMC(2), linearPMC(2), PMC([(0,2),(1,6),(3,5),(4,7)])]:
ddstr = identityDD(pmc)
cx = ddstr.hochschildCochains()
cx.simplify()
self.assertEqual(len(cx), 16)
class DDMorphismTest(unittest.TestCase):
def testMappingCone(self):
pmc = splitPMC(1)
alg = pmc.getAlgebra()
tensor_alg = TensorDGAlgebra((alg, alg))
ddstr1 = SimpleDDStructure(F2, alg, alg)
ddstr2 = SimpleDDStructure(F2, alg, alg)
gens = dict()
gens["x"] = SimpleDDGenerator(ddstr1, pmc.idem([0]), pmc.idem([0]), "x")
gens["y"] = SimpleDDGenerator(ddstr2, pmc.idem([1]), pmc.idem([1]), "y")
ddstr1.addGenerator(gens["x"])
ddstr2.addGenerator(gens["y"])
ddstr1.addDelta(
gens["x"], gens["x"], pmc.sd([(0, 2)]), pmc.sd([(0, 2)]), 1)
ddstr2.addDelta(
gens["y"], gens["y"], pmc.sd([(1, 3)]), pmc.sd([(1, 3)]), 1)
morphism_cx = MorDDtoDDComplex(F2, ddstr1, ddstr2)
morphism = 1*MorDDtoDDGenerator(
morphism_cx, gens["x"],
TensorGenerator((pmc.sd([(0, 1)]), pmc.sd([(0, 1)])), tensor_alg),
gens["y"])
morphism += 1*MorDDtoDDGenerator(
morphism_cx, gens["x"],
TensorGenerator((pmc.sd([(2, 3)]), pmc.sd([(2, 3)])), tensor_alg),
gens["y"])
# Need diff of morphism to be zero for the mapping cone to satisfy
# d^2 = 0.
self.assertEqual(morphism.diff(), 0)
mapping_cone = morphism_cx.getMappingCone(morphism)
self.assertEqual(len(mapping_cone), 2)
mc_gens = dict()
for gen in mapping_cone.getGenerators():
mc_gens[gen.name] = gen
self.assertTrue("S_x" in mc_gens and "T_y" in mc_gens)
self.assertEqual(len(mapping_cone.delta(mc_gens["S_x"])), 3)
self.assertEqual(len(mapping_cone.delta(mc_gens["T_y"])), 1)
self.assertTrue(mapping_cone.testDelta())
def testMultiply(self):
pmc = splitPMC(2)
alg = pmc.getAlgebra()
tensor_alg = TensorDGAlgebra((alg, alg))
ddstr = SimpleDDStructure(F2, alg, alg)
gens = dict()
gens["x"] = SimpleDDGenerator(ddstr, pmc.idem([0]), pmc.idem([0]), "x")
gens["y"] = SimpleDDGenerator(ddstr, pmc.idem([1]), pmc.idem([1]), "y")
ddstr.addGenerator(gens["x"])
ddstr.addGenerator(gens["y"])
morphism_cx = MorDDtoDDComplex(F2, ddstr, ddstr)
morphism1 = 1*MorDDtoDDGenerator(
morphism_cx, gens["x"],
TensorGenerator((pmc.sd([(0, 1)]), pmc.sd([(0, 1)])), tensor_alg),
gens["y"])
morphism1 += 1*MorDDtoDDGenerator(
morphism_cx, gens["x"],
TensorGenerator((pmc.sd([(2, 3)]), pmc.sd([(2, 3)])), tensor_alg),
gens["y"])
morphism2 = 1*MorDDtoDDGenerator(
morphism_cx, gens["y"],
TensorGenerator((pmc.sd([(1, 2)]), pmc.sd([(1, 2)])), tensor_alg),
gens["x"])
self.assertEqual(len(morphism1 * morphism2), 1)
self.assertEqual(len(morphism2 * morphism1), 1)
self.assertNotEqual(morphism1 * morphism2, morphism2 * morphism1)
if __name__ == "__main__":
unittest.main()
| 5,899
| 37.562092
| 80
|
py
|
bfh_python
|
bfh_python-master/localpmctest.py
|
"""Unit test for localpmc.py"""
from localpmc import *
from pmc import splitPMC
import unittest
class LocalPMCTest(unittest.TestCase):
def testLocalPMC(self):
# One piece: (0-1-2-3*), with 0 paired with 2. Appears in short
# underslide at bottom of full PMC.
pmc1 = LocalPMC(4, [(0, 2),(1,)], [3])
self.assertEqual(pmc1.num_pair, 2)
self.assertEqual(pmc1.otherp, [2, 1, 0, -1])
self.assertEqual(pmc1.pairid, [0, 1, 0, -1])
self.assertEqual(pmc1.pairs, [(0, 2),(1,)])
# One piece: (0*-1-2-3), with 1 paired with 3. Appears in short
# underslide at top of full PMC.
pmc2 = LocalPMC(4, [(1, 3),(2,)], [0])
self.assertEqual(pmc2.num_pair, 2)
self.assertEqual(pmc2.otherp, [-1, 3, 2, 1])
self.assertEqual(pmc2.pairid, [-1, 0, 1, 0])
self.assertEqual(pmc2.pairs, [(1, 3),(2,)])
# Two pieces: (0*-1-2-3*) (4*-5-6*), with 2 paired with 5. Appears in
# general arcslides.
pmc3 = LocalPMC(7, [(1,),(2, 5)], [0, 3, 4, 6])
self.assertEqual(pmc3.num_pair, 2)
self.assertEqual(pmc3.otherp, [-1, 1, 5, -1, -1, 2, -1])
self.assertEqual(pmc3.pairid, [-1, 0, 1, -1, -1, 1, -1])
self.assertEqual(pmc3.pairs, [(1,),(2, 5)])
def testGetLocalStrandDiagrams(self):
# Tests number of local strand diagrams.
pmc1 = LocalPMC(4, [(0, 2),(1,)], [3])
self.assertEqual(len(pmc1.getStrandDiagrams()), 17)
pmc2 = LocalPMC(4, [(1, 3),(2,)], [0])
self.assertEqual(len(pmc2.getStrandDiagrams()), 17)
pmc3 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
self.assertEqual(len(pmc3.getStrandDiagrams()), 41)
pmc4 = LocalPMC(7, [(1,),(2, 5)], [0, 3, 4, 6])
self.assertEqual(len(pmc4.getStrandDiagrams()), 78)
class LocalStrandsTest(unittest.TestCase):
def setUp(self):
# One piece: (0*-1-2-3-4*), with 1 paired with 3. Appears in short
# underslide in the middle of PMC.
self.pmc1 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
def testConstructStrands(self):
strands1 = LocalStrands(self.pmc1, [(0, 1)])
self.assertEqual(strands1.multiplicity, [1, 0, 0, 0])
strands2 = LocalStrands(self.pmc1, [(1, 4)])
self.assertEqual(strands2.multiplicity, [0, 1, 1, 1])
def testPropagateRight(self):
strands1 = LocalStrands(self.pmc1, [(0, 1)])
strands2 = LocalStrands(self.pmc1, [(2, 4)])
test_cases = [
# Introduce pair (1, 3).
(strands1, [], [0]),
# Does not interfere with the single point (2,) that already exists.
(strands1, [1], [0, 1]),
# However, returns None (not compatible) if pair (1, 3) already
# exists.
(strands1, [0], None),
# Removes single point (2,)
(strands2, [1], []),
# Does not interfere with pair (1, 3)
(strands2, [0, 1], [0]),
# Returns none if single point (2,) does not appear in left_idem
(strands2, [0], None),
]
for strand, left_idem, right_idem in test_cases:
if right_idem is None:
self.assertEqual(strand.propagateRight(left_idem), None)
else:
self.assertEqual(strand.propagateRight(left_idem),
LocalIdempotent(self.pmc1, right_idem))
class LocalStrandDiagramTest(unittest.TestCase):
def testMultiply(self):
# 0*-1-2-3-4*, with 1 and 3 paired
pmc1 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
sd1 = pmc1.sd([(0, 1)])
sd2 = pmc1.sd([(1, 2)])
sd3 = pmc1.sd([2, (0, 1)])
sd4 = pmc1.sd([(0, 2)])
sd5 = pmc1.sd([(0, 1),(1, 2)])
sd6 = pmc1.sd([(3, 4)])
self.assertEqual(sd1 * sd2, 1*sd4)
self.assertEqual(sd2 * sd1, 0)
self.assertEqual(sd2 * sd3, 1*sd5)
self.assertEqual(sd3 * sd4, 0)
self.assertEqual(sd1 * sd6, 0)
# 0-1-2-3*-4*-5-6-7, with 0 and 2, 5 and 7 paired.
pmc2 = LocalPMC(8, [(0, 2),(1,),(5, 7),(6,)], [3, 4])
sd1 = pmc2.sd([(0, 3)])
sd2 = pmc2.sd([(4, 6)])
sd3 = pmc2.sd([(0, 3),(4, 6)])
self.assertEqual(sd1 * sd2, 1*sd3)
def testDiff(self):
# 0*-1-2-3-4*, with 1 and 3 paired
pmc1 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
sd1 = pmc1.sd([(0, 4)])
sd2 = pmc1.sd([1, (0, 4)])
sd3 = pmc1.sd([2, (0, 4)])
sd4 = pmc1.sd([(0, 1),(1, 4)])
sd5 = pmc1.sd([(0, 2),(2, 4)])
sd6 = pmc1.sd([(0, 3),(3, 4)])
self.assertEqual(sd1.diff(), 0)
self.assertEqual(sd2.diff(), 1*sd4 + 1*sd6)
self.assertEqual(sd3.diff(), 1*sd5)
def testAntiDiff(self):
# 0*-1-2-3-4*, with 1 and 3 paired
pmc1 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
sd1 = pmc1.sd([(0, 1),(1, 2)])
self.assertEqual(len(sd1.antiDiff()), 1)
sd2 = pmc1.sd([(0, 1),(1, 2),(2, 4)])
self.assertEqual(len(sd2.antiDiff()), 2)
def testFactor(self):
# 0*-1-2-3-4*, with 1 and 3 paired
pmc1 = LocalPMC(5, [(1, 3),(2,)], [0, 4])
# (0->2)*idem, (0->1)*(1->2), idem*(0->2)
sd1 = pmc1.sd([(0, 2)])
self.assertEqual(len(sd1.factor()), 3)
# (1,0->2)*idem, idem*(1,0->2)
sd2 = pmc1.sd([1, (0, 2)])
self.assertEqual(len(sd2.factor()), 2)
class PMCSplittingTest(unittest.TestCase):
def testRestrictPMC(self):
pmc1 = splitPMC(1)
pmc2 = splitPMC(2)
self.assertEqual(PMCSplitting.restrictPMC(pmc1, [(0, 2)]),
(LocalPMC(4, [(0, 2), (1,)], [3]), {0:0, 1:1, 2:2}))
self.assertEqual(PMCSplitting.restrictPMC(pmc1, [(1, 3)]),
(LocalPMC(4, [(1, 3), (2,)], [0]), {1:1, 2:2, 3:3}))
self.assertEqual(PMCSplitting.restrictPMC(pmc2, [(5, 7)]),
(LocalPMC(4, [(1, 3), (2,)], [0]), {5:1, 6:2, 7:3}))
# Restriction is (0*-1-2*) (3*-4-5*), with 2 and 4 paired
self.assertEqual(PMCSplitting.restrictPMC(pmc2, [(4, 4), (6, 6)]),
(LocalPMC(6, [(1, 4)], [0, 2, 3, 5]), {4:1, 6:4}))
def testComplementIntervals(self):
self.assertEqual(
PMCSplitting.complementIntervals(splitPMC(2), [(0, 2), (6, 6)]),
[(3, 5), (7, 7)])
def testJoin(self):
pmc = splitPMC(2)
# Local restriction is (0-1-2-3*), (4*-5-6*), where 0 and 2 are paired
# (pair-id = 0), and 1, 5 have pair-id 1 and 2, respectively.
# Outer restriction is (0*-1-2-3-4*), (5*-6), where 3 and 6 are paired
# (pair-id = 2), and 1, 2 have pair-id 0 and 1, respectively.
# Correspondence between points in pmc and local_pmc1/2:
# pmc - 0 1 2 3 4 5 6 7
# local_pmc - 0 1 2 3* 4* 5 6*
# outer_pmc - 0* 1 2 3 4* 5* 6
splitting = PMCSplitting(pmc, [(0, 2), (6, 6)])
local_pmc = splitting.local_pmc
outer_pmc = splitting.outer_pmc
# Format is as follows:
# - input to local_pmc.sd
# - input to outer_pmc.sd
# - input to pmc.sd. None indicates join should fail.
test_data = [
# One strand
([(0, 1)], [], [(0, 1)]),
([(0, 3)], [(0, 2)], [(0, 4)]),
([(0, 3),(4, 5)], [(0, 4)], [(0, 6)]),
([(0, 3),(4, 6)], [(0, 4),(5, 6)], [(0, 7)]),
([(1, 3)], [(0, 3)], [(1, 5)]),
([(1, 3)], [(0, 3)], [(1, 5)]),
# Mismatch cases
([(4, 5)], [], None),
([(0, 2)], [(0, 3)], None),
([(0, 3),(4, 5)], [], None),
([(0, 3)], [(5, 6)], None),
([1], [(1, 4)], None),
([(0, 3), 5], [(2, 3)], None),
# Several strands
([1,(2, 3)], [(0, 1),(1, 2)], [(2, 3),(3, 4)]),
([(4, 5),(5, 6)], [2,(3, 4),(5, 6)], [(5, 6),(6, 7)]),
# Double horizontal
([0, 1, 5], [1, 2], [0, 1, 4]),
([0, 1], [1, 2], [0, 1, 4]),
# Strands starting and ending at paired points
([(0, 1),(2, 3)], [(0, 2)], None),
([(0, 1),(1, 3)], [(0, 1)], None)
]
for sd1, sd2, sd_total in test_data:
joined_sd = splitting.joinStrandDiagram(local_pmc.sd(sd1),
outer_pmc.sd(sd2))
if sd_total == None:
self.assertEqual(joined_sd, None)
else:
self.assertEqual(joined_sd, pmc.sd(sd_total))
def testRestrictStrandDiagram(self):
# First test case: one local PMC at the boundary
# local_pmc1 is 0-1-2-3*, where 0 and 2 are paired.
pmc = splitPMC(2)
splitting1 = PMCSplitting(pmc, [(0, 2)])
local_pmc1 = splitting1.local_pmc
for full_sd, local_sd in [
([(0, 1)], [(0, 1)]),
([(0, 5)], [(0, 3)]),
([3, (0, 5)], [1, (0, 3)]),
([3, (4, 5)], [1]),
([(2, 5)], [(2, 3)]),
([0, (1, 3)], [0, (1, 3)]),
]:
self.assertEqual(
splitting1.restrictStrandDiagramLocal(pmc.sd(full_sd)),
local_pmc1.sd(local_sd))
# Second test case: one local PMC's in the middle
# local_pmc2 is 0*-1-2-3-4*, where no points are paired.
# so point 1 has pair-id 0, point 2 has pair-id 1, and point3 has
# pair-id 2.
splitting2 = PMCSplitting(pmc, [(3, 5)])
local_pmc2 = splitting2.local_pmc
for full_sd, local_sd in [
([(0, 1)], []),
([(0, 6)], [(0, 4)]),
([(1, 6)], [(0, 4)]),
([1], [1]),
([(0, 4)], [(0, 2)]),
([(4, 6)], [(2, 4)]),
]:
self.assertEqual(
splitting2.restrictStrandDiagramLocal(pmc.sd(full_sd)),
local_pmc2.sd(local_sd))
# Third test case: local PMC has two separated intervals.
# local_pmc3 is (0-1-2-3*), (4*-5-6*), where 0 and 2 are paired
# (pair-id = 0), 1 and 5 are single (pair-id = 1 and 2).
splitting3 = PMCSplitting(pmc, [(0, 2), (6, 6)])
local_pmc3 = splitting3.local_pmc
for full_sd, local_sd in [
([(0, 7)], [(0, 3), (4, 6)]),
([(0, 6)], [(0, 3), (4, 5)]),
([(0, 5)], [(0, 3)]),
([(2, 6)], [(2, 3), (4, 5)]),
([(3, 6)], [(4, 5)]),
([0, 4], [0, 5]),
]:
self.assertEqual(
splitting3.restrictStrandDiagramLocal(pmc.sd(full_sd)),
local_pmc3.sd(local_sd))
if __name__ == "__main__":
unittest.main()
| 10,748
| 39.258427
| 80
|
py
|
bfh_python
|
bfh_python-master/arcslidedatest.py
|
"""Unit test for arcslideda.py"""
from arcslideda import *
from arcslide import Arcslide
from autocompleteda import autoCompleteDA
from dstructure import zeroTypeD
from ddstructure import identityDD
from latex import beginDoc, endDoc, showArrow
from pmc import PMC
from pmc import antipodalPMC, linearPMC, splitPMC
import unittest
class ArcslideDATest(unittest.TestCase):
def testShortUnderslideDown(self):
slides_to_test = [
Arcslide(splitPMC(1), 1, 0),
Arcslide(splitPMC(1), 2, 1),
Arcslide(splitPMC(2), 1, 0),
Arcslide(splitPMC(2), 6, 5),
Arcslide(linearPMC(2), 1, 0),
Arcslide(linearPMC(2), 6, 5),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 1, 0),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 6, 5),
Arcslide(splitPMC(2), 2, 1),
Arcslide(splitPMC(2), 5, 4),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 4, 3),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 3, 2),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testShortUnderslideDownLocal(self):
slides_to_test = [
Arcslide(splitPMC(1), 1, 0),
Arcslide(splitPMC(1), 2, 1),
Arcslide(splitPMC(2), 2, 1),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testGeneralUnderslideDown(self):
slides_to_test = [
Arcslide(antipodalPMC(2), 1, 0),
Arcslide(antipodalPMC(2), 2, 1),
Arcslide(antipodalPMC(2), 3, 2),
Arcslide(antipodalPMC(2), 4, 3),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 1, 0),
Arcslide(PMC([(0, 5), (1, 3), (2, 6), (4, 7)]), 1, 0),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 1, 0),
Arcslide(PMC([(0, 2), (1, 4), (3, 6), (5, 7)]), 2, 1),
Arcslide(PMC([(0, 2), (1, 4), (3, 6), (5, 7)]), 4, 3),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 1),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 2, 1),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 2, 1),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 5, 4),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 3, 2),
Arcslide(PMC([(0, 5), (1, 3), (2, 6), (4, 7)]), 5, 4),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testGeneralUnderslideDownLocal(self):
slides_to_test = [
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 1, 0),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 1),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 5, 4),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testGeneralOverslideDown(self):
slides_to_test = [
Arcslide(splitPMC(1), 3, 2),
Arcslide(splitPMC(2), 3, 2),
Arcslide(splitPMC(2), 4, 3),
Arcslide(splitPMC(2), 7, 6),
Arcslide(linearPMC(2), 3, 2),
Arcslide(linearPMC(2), 5, 4),
Arcslide(linearPMC(2), 7, 6),
Arcslide(antipodalPMC(2), 5, 4),
Arcslide(antipodalPMC(2), 6, 5),
Arcslide(antipodalPMC(2), 7, 6),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testGeneralOverslideDownLocal(self):
slides_to_test = [
Arcslide(splitPMC(1), 3, 2),
Arcslide(splitPMC(2), 3, 2),
Arcslide(splitPMC(2), 4, 3),
Arcslide(splitPMC(2), 7, 6),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testShortUnderslideUp(self):
slides_to_test = [
Arcslide(splitPMC(1), 1, 2),
Arcslide(splitPMC(1), 2, 3),
Arcslide(splitPMC(2), 1, 2),
Arcslide(splitPMC(2), 6, 7),
Arcslide(linearPMC(2), 1, 2),
Arcslide(linearPMC(2), 6, 7),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 1, 2),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 6, 7),
Arcslide(splitPMC(2), 2, 3),
Arcslide(splitPMC(2), 5, 6),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 4, 5),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 3, 4),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testShortUnderslideUpLocal(self):
slides_to_test = [
Arcslide(splitPMC(1), 1, 2),
Arcslide(splitPMC(1), 2, 3),
Arcslide(splitPMC(2), 2, 3),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testGeneralUnderslideUp(self):
slides_to_test = [
Arcslide(antipodalPMC(2), 3, 4),
Arcslide(antipodalPMC(2), 4, 5),
Arcslide(antipodalPMC(2), 5, 6),
Arcslide(antipodalPMC(2), 6, 7),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 3),
Arcslide(PMC([(0, 5), (1, 3), (2, 6), (4, 7)]), 4, 5),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 2, 3),
Arcslide(PMC([(0, 2), (1, 4), (3, 6), (5, 7)]), 3, 4),
Arcslide(PMC([(0, 2), (1, 4), (3, 6), (5, 7)]), 5, 6),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 5, 6),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 5, 6),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 4, 5),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 6, 7),
Arcslide(PMC([(0, 3), (1, 5), (2, 7), (4, 6)]), 6, 7),
Arcslide(PMC([(0, 5), (1, 3), (2, 6), (4, 7)]), 6, 7),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testGeneralUnderslideUpLocal(self):
slides_to_test = [
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 3),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 5, 6),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 6, 7),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testGeneralOverslideUp(self):
slides_to_test = [
Arcslide(splitPMC(1), 0, 1),
Arcslide(splitPMC(2), 0, 1),
Arcslide(splitPMC(2), 3, 4),
Arcslide(splitPMC(2), 4, 5),
Arcslide(linearPMC(2), 0, 1),
Arcslide(linearPMC(2), 2, 3),
Arcslide(linearPMC(2), 4, 5),
Arcslide(antipodalPMC(2), 0, 1),
Arcslide(antipodalPMC(2), 1, 2),
Arcslide(antipodalPMC(2), 2, 3),
]
for slide in slides_to_test:
print(slide)
dastr = ArcslideDA(slide).toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
def testGeneralOverslideUpLocal(self):
slides_to_test = [
Arcslide(splitPMC(1), 0, 1),
Arcslide(splitPMC(2), 0, 1),
Arcslide(splitPMC(2), 3, 4),
Arcslide(splitPMC(2), 4, 5),
]
for slide in slides_to_test:
local_dastr = ArcslideDA(slide).getLocalDAStructure()
self.assertTrue(local_dastr.testDelta())
def testUnderslideAgreesWithDD(self):
slides_to_test = [
# Short underslides down
Arcslide(splitPMC(1), 1, 0),
Arcslide(linearPMC(2), 6, 5),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 1, 0),
# General underslides down
Arcslide(antipodalPMC(2), 1, 0),
Arcslide(PMC([(0, 2), (1, 4), (3, 6), (5, 7)]), 4, 3),
# Short underslides up
Arcslide(splitPMC(1), 1, 2),
Arcslide(linearPMC(2), 1, 2),
Arcslide(PMC([(0, 2), (1, 6), (3, 5), (4, 7)]), 4, 5),
# General underslides up
Arcslide(antipodalPMC(2), 6, 7),
Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 3),
]
for slide in slides_to_test:
dastr = ArcslideDA(slide)
ddstr = dastr.tensorDD(identityDD(slide.end_pmc))
ori_ddstr = slide.getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
def testOverslideAgreesWithDD(self):
# This is not guaranteed (since there is choice involved in type DD for
# overslides, but appears to work
slides_to_test = [
# General overslides down
Arcslide(splitPMC(1), 3, 2),
Arcslide(splitPMC(2), 3, 2),
Arcslide(splitPMC(2), 4, 3),
Arcslide(splitPMC(2), 7, 6),
Arcslide(linearPMC(2), 3, 2),
Arcslide(linearPMC(2), 5, 4),
Arcslide(linearPMC(2), 7, 6),
Arcslide(antipodalPMC(2), 5, 4),
Arcslide(antipodalPMC(2), 6, 5),
Arcslide(antipodalPMC(2), 7, 6),
# General overslides up
Arcslide(splitPMC(1), 0, 1),
Arcslide(splitPMC(2), 0, 1),
Arcslide(splitPMC(2), 3, 4),
Arcslide(splitPMC(2), 4, 5),
Arcslide(linearPMC(2), 0, 1),
Arcslide(linearPMC(2), 2, 3),
Arcslide(linearPMC(2), 4, 5),
Arcslide(antipodalPMC(2), 0, 1),
Arcslide(antipodalPMC(2), 1, 2),
Arcslide(antipodalPMC(2), 2, 3),
]
for slide in slides_to_test:
dastr = ArcslideDA(slide)
ddstr = dastr.tensorDD(identityDD(slide.end_pmc))
ori_ddstr = slide.getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
def testAutoCompleteArcslide(self):
for slide, d_side_order in [
(Arcslide(splitPMC(2), 2, 1), (3, 0)),
(Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 2, 1),
(5, 0, 4, 2)),
(Arcslide(splitPMC(2), 2, 3), (3, 0)),
(Arcslide(PMC([(0, 3), (1, 6), (2, 4), (5, 7)]), 5, 6),
(5, 0, 3, 1))]:
print(slide, d_side_order)
raw_da = ArcslideDA(slide).getLocalDAStructure(seeds_only = True)
autoCompleteDA(raw_da, d_side_order)
def testGrading(self):
slides_to_test = [
Arcslide(splitPMC(1), 1, 0),
Arcslide(splitPMC(2), 4, 3),
Arcslide(linearPMC(2), 1, 0),
]
for slide in slides_to_test:
dastr = ArcslideDA(slide)
dastr.toSimpleDAStructure().checkGrading()
class TensorTest(unittest.TestCase):
def testDATensorD(self):
# So far mostly checking that it will run in a reasonable amount of
# time.
slide = Arcslide(splitPMC(5), 2, 1) # will change zeroTypeD
dastr = ArcslideDA(slide)
dstr = zeroTypeD(5)
dstr_result = dastr.tensorD(dstr)
dstr_result.reindex()
self.assertEqual(len(dstr_result), 2)
if __name__ == "__main__":
unittest.main()
| 11,848
| 40
| 79
|
py
|
bfh_python
|
bfh_python-master/braidtest.py
|
"""Unit test for braid.py"""
from math import gcd
from braid import *
import unittest
import time # benchmarking
import cProfile
import pstats
class BraidTest(unittest.TestCase):
def testGetArcslide(self):
br2 = Braid(6)
pos_size = [1,2,2,1,4]
for i in range(1, 6):
self.assertTrue(len(br2.getArcslides(i)), pos_size[i-1])
self.assertTrue(len(br2.getArcslides(-i)), pos_size[i-1])
self.assertTrue(len(br2.getArcslides(list(range(1, 6)))), sum(pos_size))
class BraidCapTest(unittest.TestCase):
def testPlatTypeD2(self):
self.assertEqual(len(platTypeD2(3, True)), 1)
def testGenus2Algebra(self):
dstrs = dict()
for end3 in [(6,5,4,3,2,1),
(6,3,2,5,4,1),
(4,3,2,1,6,5),
(2,1,6,5,4,3),
(2,1,4,3,6,5)]:
dstrs[end3] = BraidCap(end3).openCap()
algs = dict()
total_gen = 0
for end3, dstr in list(dstrs.items()):
algs[end3] = dict()
for end3to, dstrto in list(dstrs.items()):
algs[end3][end3to] = dstr.morToD(dstrto)
algs[end3][end3to].simplify()
total_gen += len(algs[end3][end3to])
self.assertEqual(total_gen, 52)
def testGetCobordismSequence(self):
for matching, result in [
((2, 1), []), ((4, 3, 2, 1), [1]), ((2, 1, 4, 3), [0]),
((6, 5, 4, 3, 2, 1), [2, 1]),
((6, 3, 2, 5, 4, 1), [1, 1]),
((2, 1, 6, 5, 4, 3), [0, 1]),
((10, 3, 2, 7, 6, 5, 4, 9, 8, 1), [1, 2, 1, 1])]:
self.assertEqual(
BraidCap(matching).getCobordismSequence(), result)
def testCobordisms(self):
# 6 strands (genus 2)
for end, expected_len in [
((6,5,4,3,2,1), 2),
((6,3,2,5,4,1), 1),
((4,3,2,1,6,5), 2),
((2,1,6,5,4,3), 1),
((2,1,4,3,6,5), 1)]:
self.assertEqual(len(BraidCap(end).openCap()), expected_len)
# 8 strands (genus 3)
for end, expected_len in [
((8,7,6,5,4,3,2,1), 4),
((8,7,4,3,6,5,2,1), 3),
((8,5,4,3,2,7,6,1), 2),
((8,3,2,7,6,5,4,1), 2),
((8,3,2,5,4,7,6,1), 1),
((6,5,4,3,2,1,8,7), 4),
((6,3,2,5,4,1,8,7), 3),
((4,3,2,1,8,7,6,5), 2),
((4,3,2,1,6,5,8,7), 2),
((2,1,8,7,6,5,4,3), 2),
((2,1,8,5,4,7,6,3), 1),
((2,1,6,5,4,3,8,7), 2),
((2,1,4,3,8,7,6,5), 1),
((2,1,4,3,6,5,8,7), 1)]:
self.assertEqual(len(BraidCap(end).openCap()), expected_len)
class HFTest(unittest.TestCase):
def testHFPretzel(self):
std_cap = [6,3,2,5,4,1]
br = BridgePresentation("pretzel_-2_3_5",
std_cap, 5*[1]+3*[3]+2*[-5], std_cap)
# Test three methods of finding HF
self.assertEqual(len(br.getHF(method = "Mor")), 1)
self.assertEqual(len(br.getHF(method = "Tensor")), 1)
self.assertEqual(len(br.getHFByLocalDA()), 1)
def test11n_6(self):
std_cap = [6,5,4,3,2,1]
br = BridgePresentation("11n_6", std_cap,
[-1, -4, 3, 2, -1, 2, -3, 1, 1, 2, -3, 4],
std_cap)
# Test three methods of finding HF
self.assertEqual(len(br.getHF(method = "Mor")), 21)
self.assertEqual(len(br.getHF(method = "Tensor")), 21)
self.assertEqual(len(br.getHFByLocalDA()), 21)
class HFTestFromFile(unittest.TestCase):
def testHFFromFile(self):
to_test = ["3_1", "4_1",
"12n_0210", # 1*[3 2 2 2 2 2]
"12n_0292", # 1*[1 0 2 2 0 0 2 2]
"11n_6", "11n_9", "11n_24", # 3-bridge
"11a_14", #"12n_0055", "12n_0056", # 4-bridge
]
with open('data/input_12_FL.txt', 'r') as input_file:
with open('data/output_12.txt', 'r') as check_file:
while True:
line = input_file.readline()
if len(line) == 0:
break
expected_hf = int(check_file.readline().split()[1])
cur_br = readBridgePresentation(line)
if cur_br.name in to_test:
print("Testing: %s (genus %d)" % \
(cur_br.name, cur_br.num_strands / 2 - 1))
start_time = time.time()
cx = cur_br.getHFByLocalDA()
if hasattr(cx, "grading"):
print(cx.getGradingInfo())
self.assertEqual(len(cx), expected_hf)
print("Time elapsed (s): ", time.time() - start_time)
class TorusKnotTest(unittest.TestCase):
def testTorus(self):
def singleTest(m, n):
start_time = time.time()
cap = list(range(2*m, 0, -1))
half_twist = list(range(m-1, 0, -1))
br = BridgePresentation("T%d_%d" % (m, n), cap, n*half_twist, cap)
cx = br.getHFByLocalDA()
print(br, end=' ')
if hasattr(cx, "grading"):
print(cx.getGradingInfo())
else:
print(len(cx))
print("Time elapsed (s): ", time.time() - start_time)
for n in [1,2,4,5,7,8,10,20]:#,50,100]:
singleTest(3, n)
for n in [1,3,5,7]:#,9,11,13,15,17,19]:
singleTest(4, n)
for n in [1,2,3]:#,4,6]:
singleTest(5, n)
# for n in [1,5]:
# singleTest(6, n)
# def testGenus5FromFile(self):
# # Empty means test all
# to_test = []
# with open('data/input_14_FL.txt', 'r') as input_file:
# while True:
# line = input_file.readline()
# if len(line) == 0:
# break
# cur_br = readBridgePresentation(line)
# if len(to_test) == 0 or cur_br.name in to_test:
# print("Testing:", cur_br.name)
# start_time = time.time()
# cx = cur_br.getHFByLocalDA()
# print(cx.getGradingInfo())
# print("Time elapsed (s): ", time.time() - start_time)
class SpecSeqTest(unittest.TestCase):
def testGetSpecSeq(self):
to_test = ["3_1", "4_1",
"11n_9", "11n_12", "11n_19", # 3-bridge
"12n_0475", # four pages
"12n_0553", # 4-bridge
]
with open('data/input_12_FL.txt', 'r') as input_file:
with open('data/output_12_sseq.txt', 'r') as check_file:
while True:
# Read input
line = input_file.readline()
if len(line) == 0:
break
cur_br = readBridgePresentation(line)
# Read expected output
output_header = check_file.readline().split()
output_header[0] == cur_br.name
num_pages = int(output_header[1])
output_lines = [check_file.readline()
for i in range(num_pages)]
if cur_br.name in to_test:
print("Testing: %s (genus %d)" % \
(cur_br.name, cur_br.num_strands / 2 - 1))
start_time = time.time()
filt_grs = cur_br.getSpecSeq()
self.assertEqual(len(filt_grs), len(output_lines))
for i in range(len(output_lines)):
self.assertEqual(filt_grs[i], [
int(s) for s in output_lines[i].split()])
print("Time elapsed (s): ", time.time() - start_time)
def testGetSpecSeqProfile(self):
cProfile.runctx('self.testGetSpecSeq()', globals(), locals(), 'restats')
p = pstats.Stats('restats')
p.sort_stats('cumulative').print_stats(50)
class TorusSpecSeqTest(unittest.TestCase):
def testTorusSpecSeq(self):
def singleTest(m, n):
print("Testing T(%d,%d): " % (m, n), end=' ')
start_time = time.time()
cap = list(range(2*m, 0, -1))
half_twist = list(range(m-1, 0, -1))
br = BridgePresentation("T%d_%d" % (m, n), cap, n*half_twist, cap)
filt_grs = br.getSpecSeq()
print()
for filt_gr in filt_grs:
print(filt_gr)
print("Time elapsed (s): ", time.time() - start_time)
# Result for T(4, 5):
# [1, 2, 1, 2, 2, 1, 1, 1, 1, 0, 1]
# [1, 2, 1, 1, 2, 0, 0, 1, 0, 0, 1]
# [1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1]
# Result for T(5, 6):
# [1, 2, 2, 2, 3, 2, 2, 2, 1, 1, 1, 1, 0, 1]
# [1, 1, 1, 1, 2, 1, 1, 1, 0, 0, 1, 0, 0, 1]
# [0, 1, 1, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 1]
for n in [1,2,4,5,7,8,10,20,50,100]:
singleTest(3, n)
for n in [1,3,5,7,9,11]:#,13,15,17,19]:
singleTest(4, n)
# singleTest(5, 4)
# for n in [4,6]:
# singleTest(5, n)
if __name__ == "__main__":
unittest.main()
| 9,516
| 39.156118
| 80
|
py
|
bfh_python
|
bfh_python-master/cobordism.py
|
"""Type DD structures for cobordisms between linear pointed matched circles."""
from ddstructure import DDStrFromChords
from pmc import linearPMC
from pmc import Idempotent, Strands
from utility import memorize
# Two sides for the larger PMC:
LEFT, RIGHT = 0, 1
class Cobordism(object):
"""Represents a cobordism."""
def __init__(self, genus, c_pair, side):
"""Specifies the genus of the larger (linear PMC), the c-pair at which
the cobordism occurred, and the side of the larger PMC (LEFT or RIGHT).
"""
self.genus = genus # genus of larger PMC
self.n = 4 * self.genus # number of points in the larger PMC
self.c_pair = c_pair
self.side = side # LEFT or RIGHT
if c_pair == 0 or c_pair == 2*self.genus-1:
self.is_degenerate = True
else:
self.is_degenerate = False
self.large_pmc = linearPMC(self.genus)
self.small_pmc = linearPMC(self.genus-1)
# Some special points and pairs
self.c1, self.c2 = self.large_pmc.pairs[self.c_pair]
if self.is_degenerate:
assert self.c2 == self.c1 + 2
self.p = self.c1 + 1
self.p_pair = self.large_pmc.pairid[self.p]
else:
assert self.c2 == self.c1 + 3
self.d, self.u = self.c1 + 1, self.c1 + 2
self.d_pair = self.large_pmc.pairid[self.d]
self.u_pair = self.large_pmc.pairid[self.u]
# Construct the to_s dictionary. Keys are points on the large PMC that
# match points on the small PMC. Value is the point that it matches.
self.to_s = dict()
cur_pt = 0
for i in range(self.n):
if self.is_degenerate:
pair_i = self.large_pmc.pairid[i]
if pair_i == self.c_pair or pair_i == self.p_pair:
continue
else:
if self.c1 <= i <= self.c2:
continue
self.to_s[i] = cur_pt
cur_pt += 1
# The pair_to_s dictionary is similar, but for pairs. The c-pair does
# not match to anything. In the non-degenerate case, the u and d pairs
# both match the (u',d') pair on the left. In the degenerate case, the
# p-pair also does not match anything.
self.pair_to_s = dict()
for i in range(self.n//2):
for p in self.large_pmc.pairs[i]:
if p in self.to_s:
self.pair_to_s[i] = self.small_pmc.pairid[self.to_s[p]]
if not self.is_degenerate:
# Special pair on the small PMC
self.du_pair = self.pair_to_s[self.d_pair]
assert self.du_pair == self.pair_to_s[self.u_pair]
if self.side == LEFT:
self.start_pmc, self.end_pmc = self.large_pmc, self.small_pmc
else:
self.start_pmc, self.end_pmc = self.small_pmc, self.large_pmc
def __eq__(self, other):
return self.genus == other.genus and self.c_pair == other.c_pair and \
self.side == other.side
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.genus, self.c_pair, self.side, "Cobordism"))
@memorize
def getDDStructure(self):
"""Returns the type DD structure corresponding to this cobordism."""
all_idems = self._getIdems()
all_chords = self._getChords()
all_chords = [self._StrandsFromChords(chord1, chord2)
for chord1, chord2 in all_chords]
alg1 = self.start_pmc.getAlgebra(mult_one = True)
alg2 = self.end_pmc.getAlgebra(mult_one = True)
ddstr = DDStrFromChords(alg1, alg2, all_idems, all_chords)
assert ddstr.testDelta()
return ddstr
def _StrandsFromChords(self, chord1, chord2):
"""Create strand objects from lists of chords. Points in chord2 are
reversed (refer to the opposite pmc).
"""
chord1 = [(self.to_s[p], self.to_s[q]) for p, q in chord1]
chord_small = Strands(self.small_pmc, chord1)
chord2 = [(p, q) for p, q in chord2]
chord_large = Strands(self.large_pmc, chord2)
if self.side == LEFT:
return (chord_large, chord_small.opp())
else:
return (chord_small, chord_large.opp())
@memorize
def _getIdems(self):
"""Returns the set of possible idempotent-pairs for generators.
In the non-degenerate case: the c-pair must be on the right, and at most
one of u and d-pairs are on the right. The left idempotent is the
complement of the right idempotent, under the mapping given by
self.pair_to_s.
In the degenerate case: the c-pair must be on the right, and the p-pair
must not be on the right. The left idempotent is again the complement
of the right idempotent.
"""
all_idems = []
large_idems = self.large_pmc.getIdempotents()
for large_idem in large_idems:
if self.c_pair not in large_idem:
continue
if self.is_degenerate:
if self.p_pair in large_idem:
continue
else:
if self.u_pair in large_idem and self.d_pair in large_idem:
continue
small_idem_comp = Idempotent(
self.small_pmc,
[self.pair_to_s[i] for i in large_idem if i != self.c_pair])
small_idem = small_idem_comp.comp()
if self.side == LEFT:
all_idems.append((large_idem, small_idem.opp()))
else:
all_idems.append((small_idem, large_idem.opp()))
return all_idems
def _getChords(self):
"""Returns the chords in the RIGHT case. The chords in the LEFT case are
formed by switching the components of the pair.
"""
all_chords = []
for x in range(self.n):
for y in range(x+1, self.n):
if x in self.to_s and y in self.to_s:
all_chords.append(([(x, y)], [(x, y)]))
all_chords.append(([], [(self.c1, self.c2)]))
if not self.is_degenerate:
all_chords.append(([], [(self.d, self.u)]))
all_chords.append(([], [(self.c1, self.d), (self.u, self.c2)]))
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
all_chords.append(([(x, y)], [(x, self.c1), (self.c2, y)]))
all_chords.append(([(x, y)], [(x, self.d), (self.u, y)]))
return all_chords
| 6,619
| 36.613636
| 80
|
py
|
bfh_python
|
bfh_python-master/dstructure.py
|
"""Defines type D structures."""
from fractions import Fraction
from algebra import DGAlgebra, FreeModule, Generator, SimpleChainComplex, \
Tensor, TensorGenerator
from algebra import simplifyComplex
from algebra import E0
from grading import GeneralGradingSet, GeneralGradingSetElement
from hdiagram import getZeroFrameDiagram, getInfFrameDiagram, getPlatDiagram
from pmc import Idempotent, Strands, StrandDiagram
from pmc import connectSumPMC, splitPMC, linearPMC
from utility import MorObject, NamedObject
from utility import memorize
from utility import ACTION_LEFT, DEFAULT_GRADING, F2, SMALL_GRADING
class DGenerator(Generator):
"""Represents a generator of type D structure. Distinguished by (python)
identity.
"""
def __init__(self, parent, idem):
"""Every generator must have an idempotent."""
Generator.__init__(self, parent)
self.idem = idem
def toSimpleDGenerator(self, name):
"""Convert to a SimpleDGenerator with the given name. All fields are
preserved, except ``name`` which is overwritten, and _hash_val which is
removed, if present.
"""
new_obj = SimpleDGenerator(self.parent, self.idem, name)
new_obj.__dict__.update(self.__dict__)
new_obj.name = name # to make sure original name is overwritten
if hasattr(new_obj, '_hash_val'):
del new_obj._hash_val # reset hash value
return new_obj
class SimpleDGenerator(DGenerator, NamedObject):
"""Represents a generator of type D structure, distinguished by name."""
def __init__(self, parent, idem, name):
"""Specifies name in addition."""
DGenerator.__init__(self, parent, idem)
NamedObject.__init__(self, name)
class MorDtoDGenerator(Generator, MorObject):
"""Represents a generator of the morphism complex from a type D structure
to another type D structure.
"""
def __init__(self, parent, source, coeff, target):
"""Specifies the morphism source -> coeff * target."""
Generator.__init__(self, parent)
MorObject.__init__(self, source, coeff, target)
filt = []
if hasattr(source, "filtration"):
filt += [1-x for x in source.filtration]
if hasattr(target, "filtration"):
filt += target.filtration
if filt != []:
self.filtration = filt
def apply(self,x):
"Return self(x) where x is a DGenerator."
assert self.source.parent == x.parent
if self.source == x:
return self.coeff*self.target
return E0
def compose(self,g,parent=None):
"Return composition self\circ g of two MorDtoDGenerator instances"
assert self.source.parent == g.target.parent
if parent:
par = parent
else:
par = g.source.parent.morToD(self.target.parent)
coeff = g.coeff*self.coeff
if self.source == g.target and coeff:
return MorDtoDGenerator(par, g.source, list(coeff.keys())[0], self.target)
return E0
class DStructure(FreeModule):
"""Represents a type D structure. Note delta() returns an element in the
tensor module Tensor((A,M)).
"""
def __init__(self, ring, algebra, side):
"""Specifies the algebra and side of the type D action."""
FreeModule.__init__(self, ring)
assert isinstance(algebra, DGAlgebra)
self.algebra = algebra
self.side = side
# Construct A tensor M. Add diff and the left action of A on this
# tensor product.
self.AtensorM = Tensor((algebra, self))
def _mul_A_AtensorM(xxx_todo_changeme, ACoeff):
"""To be used as rmultiply() in AtensorM. Multiply ACoeff with
AGen.
"""
(AGen, MGen) = xxx_todo_changeme
return (ACoeff * AGen) * MGen
def _diff_AtensorM(xxx_todo_changeme1):
"""To be used as diff() in AtensorM."""
(AGen, MGen) = xxx_todo_changeme1
return (AGen.diff() * MGen) + (AGen * MGen.delta())
self.AtensorM.rmultiply = _mul_A_AtensorM
self.AtensorM.diff = _diff_AtensorM
def delta(self, generator):
"""Returns delta^1 of the generator."""
raise NotImplementedError("Differential not implemented.")
def rmultiply(self, MGen, AGen):
"""Multiply a generator of the DStructure with an algebra generator
means forming the tensor.
"""
return 1*TensorGenerator((AGen, MGen), self.AtensorM)
class SimpleDStructure(DStructure):
"""Represents a type D structure with a finite number of generators, and
explicitly stored generating set and delta operation.
"""
def __init__(self, ring, algebra, side = ACTION_LEFT):
"""Initializes an empty type D structure."""
assert side == ACTION_LEFT, "Right action not implemented."
DStructure.__init__(self, ring, algebra, side)
self.generators = set()
self.delta_map = dict()
def __len__(self):
return len(self.generators)
def delta(self, generator):
return self.delta_map[generator]
def getGenerators(self):
return list(self.generators)
def addGenerator(self, generator):
"""Add a generator. No effect if the generator already exists."""
assert generator.parent == self
assert isinstance(generator, DGenerator)
self.generators.add(generator)
if generator not in self.delta_map:
self.delta_map[generator] = E0
def addDelta(self, gen_from, gen_to, alg_coeff, ring_coeff):
"""Add ring_coeff * alg_coeff * gen_to to the delta of gen_from. Both
arguments should be generators.
"""
assert gen_from.parent == self and gen_to.parent == self
if alg_coeff is None:
alg_coeff = gen_to.idem.toAlgElt(self.algebra)
assert alg_coeff.getLeftIdem() == gen_from.idem
assert alg_coeff.getRightIdem() == gen_to.idem
self.delta_map[gen_from] += (alg_coeff * gen_to) * ring_coeff
def reindex(self):
"""Replace the generators by simple generators indexed by integers."""
gen_list = list(self.generators)
new_gen_list = []
translate_dict = dict()
for i in range(len(gen_list)):
new_gen = gen_list[i].toSimpleDGenerator("g%d"%(i+1))
new_gen_list.append(new_gen)
translate_dict[gen_list[i]] = new_gen
self.generators = set(new_gen_list)
new_delta = dict()
for k, v in list(self.delta_map.items()):
new_v = E0
for (AGen, MGen), coeff in list(v.items()):
new_v += (AGen * translate_dict[MGen]) * coeff
new_delta[translate_dict[k]] = new_v
self.delta_map = new_delta
if hasattr(self, "grading"):
new_grading = dict()
for gen, gr in list(self.grading.items()):
if gen in translate_dict: # gen is still in dstr
new_grading[translate_dict[gen]] = gr
self.grading = new_grading
def deltaCoeff(self, gen_from, gen_to):
"""Return the coefficient (as algebra element) of gen_to in delta of
gen_from.
"""
if self.delta_map[gen_from] == 0:
return E0
else:
return self.delta_map[gen_from].fixLast(gen_to)
def testDelta(self):
"""Verify d^2 = 0 for this structure."""
for gen in self.generators:
if gen.delta().diff() != 0:
# Print the offending terms in d^2 for one generator.
print(gen, "==>")
for k, v in list(gen.delta().diff().items()):
print(v, "*", k)
return False
return True
def __str__(self):
result = "Type D Structure.\n"
for k, v in list(self.delta_map.items()):
result += "d(%s) = %s\n" % (k, v)
return result
def morToD(self, other):
"""Compute the chain complex of morphisms from self to other."""
assert self.algebra == other.algebra
alg_gens = self.algebra.getGenerators()
xlist = self.getGenerators()
ylist = other.getGenerators()
gens = list()
cx = SimpleChainComplex(F2)
genType = MorDtoDGenerator
def morGradingSet():
"""Find the grading set of the new chain complex."""
return GeneralGradingSet([self.gr_set.inverse(), other.gr_set])
def morGrading(gr_set, x, a, y):
"""Find the grading of the generator x -> ay in the morphism
complex. The grading set need to be provided as gr_set.
"""
gr = [self.grading[x].inverse(), other.grading[y] * a.getGrading()]
return GeneralGradingSetElement(gr_set, gr)
# Prepare rev_delta for the last step in computing differentials
rev_delta = dict()
for x in xlist:
rev_delta[x] = []
for p in xlist:
for (b, q), coeff in list(p.delta().items()):
rev_delta[q].append(((b, p), coeff))
# Get the list of generators
for x in xlist:
for a in alg_gens:
for y in ylist:
if x.idem == a.getLeftIdem() and \
y.idem == a.getRightIdem():
gens.append(genType(cx, x, a, y))
for gen in gens:
cx.addGenerator(gen)
# Get differentials
for gen in gens:
# Differential of ay in (x -> ay)
x, a, y = gen.source, gen.coeff, gen.target
day = a * y.delta() + a.diff() * y
for (b, q), coeff in list(day.items()):
cx.addDifferential(gen, genType(cx, x, b, q), coeff)
# For each p such that b*x is in dp, add p->(ba)y
for (b, p), coeff1 in rev_delta[x]:
for ba_gen, coeff2 in list((b*a).items()):
cx.addDifferential(
gen, genType(cx, p, ba_gen, y), coeff1*coeff2)
# Find grading set and grading of elements
if hasattr(self, "gr_set") and hasattr(other, "gr_set"):
cx.gr_set = morGradingSet()
cx.grading = dict()
for gen in gens:
cx.grading[gen] = morGrading(cx.gr_set,
gen.source, gen.coeff, gen.target)
return cx
def simplify(self, cancellation_constraint = None):
"""Simplify a type D structure using cancellation lemma."""
# Simplification is best done in terms of coefficients
# Build dictionary of coefficients
arrows = dict()
for gen in self.generators:
arrows[gen] = dict()
for gen in self.generators:
for (AGen, MGen), coeff in list(self.delta_map[gen].items()):
if MGen not in arrows[gen]:
arrows[gen][MGen] = E0
arrows[gen][MGen] += AGen * coeff
arrows = simplifyComplex(
arrows, E0,
cancellation_constraint = cancellation_constraint)
# Now rebuild the type D structure
self.generators = set()
self.delta_map = dict()
for x in arrows:
self.generators.add(x)
self.delta_map[x] = E0
for y, coeff in list(arrows[x].items()):
self.delta_map[x] += coeff * y
# This is a good place to simplify gradings
if hasattr(self, "gr_set"):
new_gr_set = self.gr_set.simplifiedSet()
for gen in self.generators:
self.grading[gen] = self.gr_set.simplifiedElt(self.grading[gen])
self.gr_set = new_gr_set
def registerHDiagram(self, diagram, base_gen, base_gr = None):
"""Associate the given diagram as the Heegaard diagram from which this
type D structure can be derived. Broadly similar (and somewhat simpler)
than the type DD case. See the corresponding method for ddstructure for
details.
"""
self.hdiagram = diagram
# Match PMC's and check that they make sense
hd_pmc = self.hdiagram.pmc_list[0]
dds_pmc = self.algebra.pmc
assert hd_pmc.opp() == dds_pmc
# Now attempt to match generators
self.hdiagram_gen_map = dict()
gens, dgens = self.generators, diagram.getHFGenerators()
for gen in gens:
for dgen in dgens:
dgen_idem = dgen.getDIdem()[0]
if gen.idem == dgen_idem:
self.hdiagram_gen_map[gen] = dgen
break
assert gen in self.hdiagram_gen_map
# Compute grading and check consistency with algebra actions
base_hgen = self.hdiagram_gen_map[base_gen]
self.gr_set, gr = self.hdiagram.computeDGrading(base_hgen, base_gr)
self.grading = dict()
for gen in gens:
self.grading[gen] = gr[self.hdiagram_gen_map[gen]]
self.checkGrading()
@memorize
def dual(self):
"""Returns the dual of this type D structure, which is the type D
invariant of the orientation reversed bordered 3-manifold. Reverse all
arrows and take the opp() of all coefficients. The result is a type D
structure over the opposite algebra (acting from the same side).
"""
dual_str = SimpleDStructure(self.ring, self.algebra.opp(), self.side)
# Map from generators in self to generators in dual_str:
gen_map = dict()
for x in self.generators:
# Don't want to deal with the case where x represents more
# complicated information. Use reindex() to reduce to this case.
assert isinstance(x, SimpleDGenerator)
new_x = SimpleDGenerator(dual_str, x.idem.opp(), x.name)
dual_str.addGenerator(new_x)
gen_map[x] = new_x
for x in self.generators:
for (a, y), coeff in list(x.delta().items()):
dual_str.addDelta(gen_map[y], gen_map[x], a.opp(), coeff)
if hasattr(self, "gr_set"):
dual_str.gr_set = self.gr_set.inverse().opp()
dual_str.grading = dict()
for x in self.generators:
dual_str.grading[gen_map[x]] = self.grading[x].inverse().opp()
return dual_str
def checkGrading(self):
"""Check grading is consistent with the type D operations."""
for x in self.generators:
for (a, y), coeff in list(x.delta().items()):
gr_x = self.grading[x]
gr_y = self.grading[y]
assert gr_x - 1 == gr_y * [a.getGrading()]
def compareDStructures(self, other):
"""Compare two type D structures, print out any differences."""
# Some basic tests:
if len(self) != len(other):
print("Different number of generators.""")
return False
if self.algebra != other.algebra:
print("Different algebra action.")
return False
gen_map = dict()
for gen1 in self.generators:
for gen2 in other.generators:
if gen1.idem == gen2.idem:
gen_map[gen1] = gen2
break
for gen1 in self.generators:
for gen2 in self.generators:
coeff1 = self.deltaCoeff(gen1, gen2)
coeff2 = other.deltaCoeff(gen_map[gen1], gen_map[gen2])
if coeff1 != coeff2:
print("Different coefficient at %s->%s" % (gen1, gen2))
print("%s vs %s" % (coeff1, coeff2))
return False
return True
def id(self):
"Return the identity map of self."
answer = E0
morcx = self.morToD(self)
for x in self.getGenerators():
idx = MorDtoDGenerator(morcx, x, x.idem.toAlgElt(self.algebra), x)
answer += 1*idx
return answer
def connectSumTypeD(dstr1, dstr2):
"""Form the connect sum of two type D structures."""
algebra1, algebra2 = dstr1.algebra, dstr2.algebra
assert algebra1.mult_one == algebra2.mult_one
pmc1, pmc2 = algebra1.pmc, algebra2.pmc
pmc = connectSumPMC(pmc1, pmc2)
algebra = pmc.getAlgebra(mult_one = algebra1.mult_one)
dstr = SimpleDStructure(F2, algebra)
# Maps pairs of generators in dstr1 and dstr2 to a generator in dstr, and
# vice versa.
pair_map = dict()
rev_pair_map = dict()
for gen1 in dstr1.getGenerators():
for gen2 in dstr2.getGenerators():
assert all([isinstance(x, SimpleDGenerator) for x in (gen1, gen2)])
idem = list(gen1.idem) + [p+pmc1.num_pair for p in gen2.idem]
idem = Idempotent(pmc, idem)
gen = SimpleDGenerator(dstr, idem, gen1.name + gen2.name)
dstr.addGenerator(gen)
pair_map[(gen1, gen2)] = gen
rev_pair_map[gen] = (gen1, gen2)
for gen in dstr.getGenerators():
gen1, gen2 = rev_pair_map[gen]
for (a, y), coeff in list(gen1.delta().items()):
new_strands = Strands(pmc, a.strands)
new_a = StrandDiagram(algebra, gen.idem, new_strands)
dstr.addDelta(gen, pair_map[(y, gen2)], new_a, coeff)
for (a, y), coeff in list(gen2.delta().items()):
new_strands = Strands(
pmc, [(p+pmc1.n, q+pmc1.n) for p,q in a.strands])
new_a = StrandDiagram(algebra, gen.idem, new_strands)
dstr.addDelta(gen, pair_map[(gen1, y)], new_a, coeff)
return dstr
typeDGrs1 = {"zeroDual" : (0, [0,0]),
"zeroReg" : (Fraction(1,2), [0,Fraction(1,2)]),
"infDual" : (Fraction(1,4), [0,0]),
"infReg" : (-Fraction(1,4), [-Fraction(1,2),0])}
typeDGrs2 = {"zeroDual" : (0, [0,0]),
"zeroReg" : (0, [0,-Fraction(1,2)]),
"infDual" : (Fraction(1,4), [0,0]),
"infReg" : (Fraction(1,4), [Fraction(1,2),0])}
typeDGrs3 = {"zeroDual" : (0, [0,-1]),
"zeroReg" : (0, [0,Fraction(1,2)]),
"infDual" : (-Fraction(3,4), [1,0]),
"infReg" : (-Fraction(3,4), [-Fraction(1,2),0])}
typeDGrs4 = {"zeroDual" : (Fraction(1,2), [0,-1]),
"zeroReg" : (0, [0,-Fraction(1,2)]),
"infDual" : (-Fraction(1,4), [1,0]),
"infReg" : (Fraction(1,4), [Fraction(1,2),0])}
typeDGrs5 = {"zeroDual" : (0, [0,Fraction(-1,2)]),
"zeroReg" : (Fraction(1,2), [0,-1]),
"infDual" : (Fraction(1,4), [Fraction(1,2),0]),
"infReg" : (-Fraction(1,4), [1,0])}
typeDGrs6 = {"zeroDual" : (0, [0,Fraction(-1,2)]),
"zeroReg" : (0, [0,0]),
"infDual" : (Fraction(1,4), [Fraction(1,2),0]),
"infReg" : (Fraction(1,4), [0,0])}
typeDGrs7 = {"zeroDual" : (0, [0,Fraction(1,2)]),
"zeroReg" : (0, [0,-1]),
"infDual" : (-Fraction(3,4), [Fraction(-1,2),0]),
"infReg" : (-Fraction(3,4), [1,0])}
typeDGrs8 = {"zeroDual" : (Fraction(1,2), [0,Fraction(1,2)]),
"zeroReg" : (0, [0,0]),
"infDual" : (-Fraction(1,4), [Fraction(-1,2),0]),
"infReg" : (Fraction(1,4), [0,0])}
typeDGrs = [typeDGrs1, typeDGrs2, typeDGrs3, typeDGrs4,
typeDGrs5, typeDGrs6, typeDGrs7, typeDGrs8]
def getDGrs(abs_gr_info, code_str):
"""Returns the maslov and spinc components of the absolute grading using
the given grading info and code string.
"""
maslov, spinc = 0, []
for info in abs_gr_info:
cur_maslov, cur_spinc = typeDGrs[info][code_str]
maslov += cur_maslov
spinc += cur_spinc
return maslov, spinc
def zeroTypeD(genus, is_dual = False, abs_gr_info = None):
"""Returns the type D structure for the 0-framed handlebody of a given
genus.
"""
pmc = splitPMC(genus)
algebra = pmc.getAlgebra()
dstr = SimpleDStructure(F2, algebra)
idem = pmc.idem([4*i for i in range(genus)])
genx = SimpleDGenerator(dstr, idem, "x")
dstr.addGenerator(genx)
for i in range(genus):
sd = StrandDiagram(algebra, idem, [(4*i,4*i+2)])
dstr.addDelta(genx, genx, sd, 1)
if abs_gr_info is None:
genx_gr = None
else:
assert DEFAULT_GRADING == SMALL_GRADING
if is_dual:
maslov, spinc = getDGrs(reversed(abs_gr_info), "zeroDual")
else:
maslov, spinc = getDGrs(abs_gr_info, "zeroReg")
genx_gr = pmc.small_gr(maslov, spinc) # really pmc_opp
dstr.registerHDiagram(getZeroFrameDiagram(genus), genx, genx_gr)
if is_dual:
dstr = dstr.dual()
return dstr
def zeroTypeDAdm(genus):
"""Returns a larger type D structure for the 0-framed handlebody of a given
genus. The diagram for this is obtained by isotopying the beta circles to
create more intersections, so it is more likely to create admissible
diagrams when tensored with another bordered diagram.
"""
if genus > 1:
return connectSumTypeD(zeroTypeDAdm(genus-1), zeroTypeDAdm(1))
# genus == 1 case
pmc = splitPMC(1)
algebra = pmc.getAlgebra()
dstr = SimpleDStructure(F2, algebra)
idem_x = pmc.idem([0])
idem_o = pmc.idem([1]) # idem for the other two generators
genx = SimpleDGenerator(dstr, idem_x, "x")
geny = SimpleDGenerator(dstr, idem_o, "y")
genz = SimpleDGenerator(dstr, idem_o, "z")
[dstr.addGenerator(gen) for gen in [genx, geny, genz]]
dstr.addDelta(genz, geny, StrandDiagram(algebra, idem_o, []), 1)
dstr.addDelta(genz, genx, StrandDiagram(algebra, idem_o, [(1,2)]), 1)
dstr.addDelta(genx, geny, StrandDiagram(algebra, idem_x, [(0,1)]), 1)
return dstr
def infTypeD(genus, is_dual = False, abs_gr_info = None):
"""Returns the type D structure for the inf-framed handlebody of a given
genus.
"""
pmc = splitPMC(genus)
algebra = pmc.getAlgebra()
dstr = SimpleDStructure(F2, algebra)
idem = pmc.idem([4*i+1 for i in range(genus)])
geny = SimpleDGenerator(dstr, idem, "y")
dstr.addGenerator(geny)
for i in range(genus):
sd = StrandDiagram(algebra, idem, [(4*i+1, 4*i+3)])
dstr.addDelta(geny, geny, sd, 1)
if abs_gr_info is None:
geny_gr = None
else:
assert DEFAULT_GRADING == SMALL_GRADING
if is_dual:
maslov, spinc = getDGrs(reversed(abs_gr_info), "infDual")
else:
maslov, spinc = getDGrs(abs_gr_info, "infReg")
geny_gr = pmc.small_gr(maslov, spinc) # really pmc_opp
dstr.registerHDiagram(getInfFrameDiagram(genus), geny, geny_gr)
if is_dual:
dstr = dstr.dual()
return dstr
def platTypeD(genus):
"""Returns the type D structure for the plat handlebody of a given
genus.
"""
pmc = linearPMC(genus)
algebra = pmc.getAlgebra()
dstr = SimpleDStructure(F2, algebra)
idem = pmc.idem([4*i+1 for i in range(genus-1)]+[4*genus-3])
genx = SimpleDGenerator(dstr, idem, "x")
dstr.addGenerator(genx)
strands = [(4*i+1,4*i+4) for i in range(genus-1)]+[(4*genus-3, 4*genus-1)]
for st in strands:
sd = StrandDiagram(algebra, idem, [st])
dstr.addDelta(genx, genx, sd, 1)
dstr.registerHDiagram(getPlatDiagram(genus), genx)
return dstr
| 23,399
| 38.065109
| 86
|
py
|
bfh_python
|
bfh_python-master/digraphtest.py
|
"""Unit test for digraph.py"""
from digraph import *
from arcslide import Arcslide
from dstructure import infTypeD, platTypeD, zeroTypeD, zeroTypeDAdm
from ddstructure import identityDD
from pmc import PMC
from pmc import splitPMC
from utility import DEFAULT_GRADING, SMALL_GRADING
import unittest
class TypeDGraphTest(unittest.TestCase):
def testBuildTypeDGraph(self):
graph1 = TypeDGraph(platTypeD(2))
class TypeDDGraphTest(unittest.TestCase):
def testBuildTypeDDGraph(self):
graph1 = TypeDDGraph(identityDD(splitPMC(1)), 2)
class TypeAAGraphTest(unittest.TestCase):
def testBuildTypeAAGraph(self):
graph1 = TypeAAGraph(splitPMC(1))
def testTensorDoubleD(self):
d1 = zeroTypeD(1)
d2 = zeroTypeDAdm(1)
d3 = zeroTypeD(2)
d4 = zeroTypeDAdm(2)
d5 = zeroTypeD(3)
d6 = zeroTypeDAdm(3)
d7 = zeroTypeD(4)
d8 = zeroTypeDAdm(4)
tests = [(d1, d2, 2), (d2, d1, 2), (d2, d2, 2),
(d3, d4, 4), (d4, d3, 4), (d4, d4, 4),
(d5, d6, 8), (d7, d8, 16)]
for d_left, d_right, expected_len in tests:
cx = computeATensorD(d_left.dual(), d_right)
cx.simplify()
self.assertEqual(len(cx), expected_len)
def testTensorDDandD(self):
d1 = zeroTypeD(1)
d2 = zeroTypeDAdm(1)
d3 = zeroTypeD(2)
dd_id = identityDD(splitPMC(1))
dd_id2 = identityDD(splitPMC(2))
dd_slide1 = Arcslide(splitPMC(1), 0, 1).getDDStructure()
dstr1 = computeDATensorD(dd_id, d1)
dstr2 = computeDATensorD(dd_id, d2)
dstr3 = computeDATensorD(dd_slide1, d1)
dstr4 = computeDATensorD(dd_id2, d3)
# Uncomment to see the structures
# print dstr1, dstr2, dstr3, dstr4
def testTensorDandDD(self):
d1 = zeroTypeD(1)
dd_id = identityDD(splitPMC(1))
dstr1 = computeATensorDD(d1, dd_id)
# Uncomment to see the structures
# print dstr1
def testTensorDoubleDD(self):
dd_id = identityDD(splitPMC(1))
self.assertTrue(computeDATensorDD(dd_id, dd_id).testDelta())
dd_id2 = identityDD(splitPMC(2))
self.assertTrue(computeDATensorDD(dd_id2, dd_id2).testDelta())
def composeSlides(start_pmc, slides):
cur_dd = Arcslide(start_pmc, *slides[0]).getDDStructure()
for slide in slides[1:]:
next_dd = Arcslide(
cur_dd.algebra2.pmc.opp(), *slide).getDDStructure()
cur_dd = computeDATensorDD(cur_dd, next_dd)
cur_dd.simplify()
cur_dd.reindex()
return cur_dd
tests = [(splitPMC(1), [(0,1),(3,2)], 2),
(PMC([(0,3),(1,6),(2,4),(5,7)]), [(2,1),(6,5),(4,3)], 6)]
for start_pmc, slides, result in tests:
composed_dd = composeSlides(start_pmc, slides)
self.assertEqual(len(composed_dd), result)
composed_dd.checkGrading()
if DEFAULT_GRADING == SMALL_GRADING:
ref_gr = list(composed_dd.grading.values())[0]
for gen, gr in list(composed_dd.grading.items()):
self.assertEqual(gr, ref_gr)
def testGrading(self):
d1 = zeroTypeD(1)
d2 = infTypeD(1)
dd_id = identityDD(splitPMC(1))
cx = computeATensorD(d1.dual(), d2)
dstr1 = computeDATensorD(dd_id, d1)
dstr2 = computeATensorDD(d1, dd_id)
def testAATensorDD1(self):
pmc = splitPMC(2)
aa_graph = getTypeAAGraph(pmc)
ddstr1 = identityDD(pmc)
dd_graph1 = TypeDDGraph(ddstr1, 1)
dastr1 = aa_graph.tensorAAandDD(dd_graph1)
self.assertTrue(dastr1.testDelta())
def testAATensorDD2(self):
pmc2 = PMC([(0,3),(1,6),(2,4),(5,7)])
ddstr2 = Arcslide(pmc2, 0, 1).getDDStructure()
dd_graph2 = TypeDDGraph(ddstr2, 1)
aa_graph2 = getTypeAAGraph(pmc2)
dastr2 = aa_graph2.tensorAAandDD(dd_graph2)
self.assertTrue(dastr2.testDelta())
def testDDTensorAA(self):
pmc = splitPMC(1)
aa_graph = getTypeAAGraph(pmc)
ddstr1 = identityDD(pmc)
dd_graph1 = TypeDDGraph(ddstr1, 2)
dastr1 = aa_graph.tensorDDandAA(dd_graph1)
self.assertTrue(dastr1.testDelta())
ddstr2 = Arcslide(pmc, 0, 1).getDDStructure()
dd_graph2 = TypeDDGraph(ddstr2, 2)
dastr2 = aa_graph.tensorDDandAA(dd_graph2)
self.assertTrue(dastr2.testDelta())
if __name__ == "__main__":
unittest.main()
| 4,570
| 34.710938
| 74
|
py
|
bfh_python
|
bfh_python-master/minusalgtest.py
|
"""Unit test for minusalg.py"""
from minusalg import *
from ddstructure import SimpleDDGenerator, SimpleDDStructure
from pmc import Idempotent
import unittest
class MinusAlgTest(unittest.TestCase):
def testGenerators(self):
gens = MinusStrandAlgebra(F2, splitPMC(1)).getGenerators()
self.assertEqual(len(gens), 18)
# def testMultiply(self):
# gens = MinusStrandAlgebra(F2, splitPMC(1)).getGenerators()
# for gen in gens:
# # Number of factors should be 1 + (length of strand)
# self.assertEqual(len(gen.factor()), 1 + sum(gen.multiplicity))
def testHochchild(self):
pmc = splitPMC(1)
alg = MinusStrandAlgebra(F2, pmc)
ddstr = SimpleDDStructure(F2, alg, alg)
# Initialize the list of generators to add to ddstr1.
idems = {"x" : ([0], [0]),
"y" : ([1], [1])}
gens = {}
for name, (idem1, idem2) in list(idems.items()):
gens[name] = SimpleDDGenerator(
ddstr, Idempotent(pmc, idem1), Idempotent(pmc, idem2), name)
ddstr.addGenerator(gens[name])
# Now add delta
ddstr.addDelta(gens["x"], gens["y"],
minusSD(pmc, [(0, 1)]), minusSD(pmc, [(2, 3)]), 1)
ddstr.addDelta(gens["y"], gens["x"],
minusSD(pmc, [(1, 2)]), minusSD(pmc, [(1, 2)]), 1)
ddstr.addDelta(gens["x"], gens["y"],
minusSD(pmc, [(2, 3)]), minusSD(pmc, [(0, 1)]), 1)
ddstr.addDelta(gens["y"], gens["x"],
minusSD(pmc, [(3, 0)]), minusSD(pmc, [(3, 0)]), 1)
print(ddstr)
self.assertTrue(ddstr.testDelta())
dstr = ddstr.toDStructure()
print(dstr)
self.assertTrue(dstr.testDelta())
hochchild = dstr.morToD(dstr)
print(hochchild)
hochchild.simplify(find_homology_basis = True)
print(len(hochchild))
meaning_len = [len(gen.prev_meaning)
for gen in hochchild.getGenerators()]
for gen in hochchild.getGenerators():
print(gen.prev_meaning)
def testLargeChainComplex(self):
getHalfIdComplex()
if __name__ == "__main__":
unittest.main()
| 2,240
| 36.35
| 76
|
py
|
bfh_python
|
bfh_python-master/digraph.py
|
"""Handles things related to directed graphs."""
from algebra import Generator, SimpleChainComplex
from algebra import E0
from dstructure import DGenerator, SimpleDStructure
from ddstructure import DDGenerator, SimpleDDStructure
from dastructure import DATensorDGenerator, DATensorDDGenerator, \
SimpleDAGenerator, SimpleDAStructure
from grading import GeneralGradingSet, GeneralGradingSetElement
from identityaa import homotopyMap
from pmc import Strands, StrandDiagram
from utility import memorize
from utility import F2
class DiGraph(object):
"""Interface for a general directed graph."""
def getOutEdges(self, node):
"""Get the list of outward edges from a node."""
raise NotImplementedError("Get list of outward edges not implemented.")
class DiGraphNode(object):
"""A general node in a digraph."""
def __init__(self, parent):
"""Every node needs a parent graph."""
self.parent = parent
class DiGraphEdge(object):
"""A general edge in a digraph."""
def __init__(self, source, target):
"""Every edge needs a source and target (nodes from the same digraph).
"""
assert source.parent == target.parent
self.source = source
self.target = target
class ConcreteDiGraph(DiGraph):
"""Represents a directed graph as a list of nodes and lists of edges."""
def __init__(self):
"""Initializes an empty concrete digraph."""
self.nodes = []
self.edges = dict() # dictionary from nodes to list of edges
def __str__(self):
result = "Di-graph with %d nodes." % len(self.nodes)
for node, outs in list(self.edges.items()):
node_str = "Node %s with outward edges:\n" % str(node)
node_str += "\n".join([str(edge) for edge in outs])
result += ("\n" + node_str)
return result
def addNode(self, node):
"""Add a node."""
self.nodes.append(node)
assert node not in self.edges
self.edges[node] = []
assert node.parent == self
def addEdge(self, edge):
"""Add an edge between two nodes in this graph."""
self.edges[edge.source].append(edge)
def getNodes(self):
return self.nodes
def getOutEdges(self, node):
return self.edges[node]
class TypeDGraphNode(DiGraphNode):
"""A node in a type D graph. Stores the generator of type D structure
corresponding to this node.
"""
def __init__(self, parent, dgen):
"""Specifies generator of type D structure corresponding to this node.
"""
DiGraphNode.__init__(self, parent)
self.dgen = dgen
self.idem = self.dgen.idem
def __str__(self):
return str(self.dgen)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.dgen == other.dgen
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.dgen, "TypeDGraphNode"))
class TypeDGraphEdge(DiGraphEdge):
"""An edge in a type D graph. Corresponding to a type D operation. Stores
the algebra coefficient.
"""
def __init__(self, source, target, coeff):
"""Specifies the algebra coefficient."""
DiGraphEdge.__init__(self, source, target)
self.coeff = coeff
def __str__(self):
return "Edge from %s to %s with coeff %s" % \
(str(self.source), str(self.target), str(self.coeff))
def __repr__(self):
return str(self)
class TypeDGraph(ConcreteDiGraph):
"""Corresponding to a type D structure. Nodes corresponds to generators and
edges corresponds to type D operations.
"""
def __init__(self, dstr):
"""Creates a type D graph from a type D structure."""
ConcreteDiGraph.__init__(self)
# Maintain a dictionary from DGenerator to nodes in graph
self.graph_node = dict()
self.algebra = dstr.algebra
# Add nodes
for dgen in dstr.getGenerators():
cur_node = TypeDGraphNode(self, dgen)
self.addNode(cur_node)
self.graph_node[dgen] = cur_node
# Add edges
for gen_from in dstr.getGenerators():
for (alg_coeff, gen_to), ring_coeff in list(gen_from.delta().items()):
self.addEdge(TypeDGraphEdge(
self.graph_node[gen_from], self.graph_node[gen_to],
alg_coeff))
class UniversalDiGraphNode(DiGraphNode, tuple):
"""A node in the universal digraph of an algebra. A sequence of algebra
generators.
"""
def __new__(cls, parent, data):
return tuple.__new__(cls, data)
def __init__(self, parent, data):
"""Specifies parent DiGraph. data is the list of generators."""
self.parent = parent
# Note tuple initialization is automatic
class UniversalDiGraph(DiGraph):
"""The universal digraph of an algebra (usually strand algebra of a PMC.)
Nodes correspond to ordered sequences of algebra generators. From each
node the set of outward edges is the set of algebra generators, appending
that generator onto the sequence.
"""
def __init__(self, algebra):
"""Create the universal digraph for the given algebra."""
self.algebra = algebra
def getOutEdges(self, gen_from):
result = []
for alg_gen in self.algebra.getGenerators():
if not alg_gen.isIdempotent():
gen_to = UniversalDiGraphNode(self, gen_from + (alg_gen,))
result.append(TypeDGraphEdge(gen_from, gen_to, alg_gen))
return result
def getInitialNode(self):
"""Return the starting node, consisting of the empty sequence of
generators.
"""
return UniversalDiGraphNode(self, tuple())
class TypeDDGraphNode(DiGraphNode):
"""A node in a type DD graph. Stores the generator of type DD structure as
well as a strand diagram whose right idempotent agrees with the left
idempotent of that generator.
"""
def __init__(self, parent, ddgen, sd):
"""Specifies generator of type DD structure and the strand diagram."""
DiGraphNode.__init__(self, parent)
self.ddgen = ddgen
self.sd = sd
self.idem1, self.idem2 = ddgen.idem1, ddgen.idem2
def __str__(self):
return "%s,%s" % (str(self.ddgen), str(self.sd))
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.ddgen == other.ddgen
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.ddgen, self.sd, "TypeDDGraphNode"))
TypeDDGraphEdge = TypeDGraphEdge
class TypeDDGraph(ConcreteDiGraph):
"""Corresponding to a type DD structure. Nodes correspond to generators
plus a strand diagram, edges correspond to type DD operations. tensor_side
specifies which algebra action (the one by algebra1 or algebra2) is
tensored with the type A or AA structure. The other side gives type D
operations.
"""
def __init__(self, ddstr, tensor_side):
"""Creates a type DD graph from a type DD structure."""
ConcreteDiGraph.__init__(self)
self.tensor_side = tensor_side
# Maintain a dictionary from (DDGenerator, StrandDiagram) to nodes in
# graph.
self.graph_node = dict()
# Dictionary from DDGenerator to directly corresponding node in graph
# (the one with idempotent strand diagram).
self.ddgen_node = dict()
self.algebra1, self.algebra2 = ddstr.algebra1, ddstr.algebra2
if tensor_side == 2:
alg_gens = self.algebra1.getGenerators()
else:
alg_gens = self.algebra2.getGenerators()
# Add nodes
for ddgen in ddstr.getGenerators():
for agen in alg_gens:
if tensor_side == 2:
idem_to_match = ddgen.idem1
else:
idem_to_match = ddgen.idem2
if agen.getRightIdem() == idem_to_match:
cur_node = TypeDDGraphNode(self, ddgen, agen)
self.addNode(cur_node)
self.graph_node[(ddgen, agen)] = cur_node
if agen.isIdempotent():
self.ddgen_node[ddgen] = cur_node
@memorize
def getOutEdges(self, gen_from):
result = []
x, sd = gen_from.ddgen, gen_from.sd
for (a, b, y), ring_coeff in list(x.delta().items()):
if self.tensor_side == 2:
output_a, tensor_a = a, b
else:
output_a, tensor_a = b, a
new_sd = sd * output_a
if new_sd != E0:
new_sd = new_sd.getElt()
gen_to = self.graph_node[(y, new_sd)]
result.append(TypeDDGraphEdge(gen_from, gen_to, tensor_a))
return result
class TypeAAGraphNode(DiGraphNode, tuple):
"""A node in a type AA graph. Stores two strand diagrams of the same PMC.
"""
def __new__(cls, parent, sd_left, sd_right):
return tuple.__new__(cls, (sd_left, sd_right))
def __init__(self, parent, sd_left, sd_right):
"""Specifies the two strand diagrams. Compute idempotents as the right
idempotents of the strand diagrams.
"""
# Note tuple initialization is automatic
DiGraphNode.__init__(self, parent)
self.idem1 = sd_left.getRightIdem()
self.idem2 = sd_right.getRightIdem()
def isHomology(self):
"""Returns whether this node is a homology node (consisting of
idempotents).
"""
return self[0].isIdempotent() and self[1].isIdempotent()
class TypeAAGraphEdge(DiGraphEdge):
"""An edge in a type AA graph. Stores type of edge and the algebra
coefficient (if applicable).
"""
# Possible values of edge_type
ALG_LEFT, ALG_RIGHT, HOMOTOPY = list(range(3))
def __init__(self, source, target, edge_type, coeff = None):
"""Specifies type of edge and algebra coefficient. coeff should be None
if and only if edge_type is HOMOTOPY.
"""
DiGraphEdge.__init__(self, source, target)
self.edge_type = edge_type
self.coeff = coeff
def __str__(self):
def strEdgeType(edge_type):
if edge_type == self.ALG_LEFT: return "Left algebra"
elif edge_type == self.ALG_RIGHT: return "Right algebra"
else: return "Homotopy"
result = "%s edge from %s to %s" % \
(strEdgeType(self.edge_type), self.source, self.target)
if self.edge_type != self.HOMOTOPY:
result += " with coefficient %s" % str(self.coeff)
return result
def __repr__(self):
return str(self)
class ATensorDGenerator(Generator, tuple):
"""Generator of a chain complex formed by tensoring a type A structure and
a type D structure (actually, the result of tensoring D1 * CFAA(Id) * D2).
"""
def __new__(cls, parent, gen_left, gen_right):
return tuple.__new__(cls, (gen_left, gen_right))
def __init__(self, parent, gen_left, gen_right):
"""Specify generators on the two sides of the tensor. Both are type D
generators because the AA generator in between is assumed.
"""
# Note tuple initialization is automatic
Generator.__init__(self, parent)
class ATensorDDGenerator(DGenerator, tuple):
"""Generator of a type D structure formed by tensoring a type A structure
and a type DD structure (actually, the result of tensoring
D * CFAA(Id) * DD).
"""
def __new__(cls, parent, gen_left, gen_right):
return tuple.__new__(cls, (gen_left, gen_right))
def __init__(self, parent, gen_left, gen_right):
"""Specify generators on two sides of the tensor (D and DD generators).
"""
# Note tuple initialization is automatic
DGenerator.__init__(self, parent, gen_right.idem2)
class TypeAAGraph(DiGraph):
"""Digraph used for simplifying a type AA structure. Nodes correspond to
generators of the large chain complex. Edges can be either homotopies or
multiplications on either side of the generator.
"""
ALG_LEFT = TypeAAGraphEdge.ALG_LEFT
ALG_RIGHT = TypeAAGraphEdge.ALG_RIGHT
HOMOTOPY = TypeAAGraphEdge.HOMOTOPY
def __init__(self, pmc):
"""Creates a type AA graph simplifying the type AA of identity for the
given PMC. Uses the chain complex and homotopy calculated in class
HomotopyAA.
"""
self.pmc_alg = pmc.getAlgebra()
# Dictionary mapping idempotents to the homology node whose idem1 is
# that idempotent.
self.homology_node = dict()
for idem in pmc.getIdempotents():
self.homology_node[idem] = TypeAAGraphNode(
self, idem.toAlgElt(self.pmc_alg),
idem.comp().toAlgElt(self.pmc_alg))
@memorize
def algFactor(self, sd1, sd2):
"""Factor out sd2 from sd1 on the right. Attempt to find sd such that
sd * sd2 = sd1. Returns sd if it exists. Otherwise return None.
"""
if sd2.getRightIdem() != sd1.getRightIdem():
return None
# Remove strands in sd2 one by one. Keep track of remaining strands.
st_remain = list(sd1.strands)
for a, b in sd2.strands:
found = False
for i in range(len(st_remain)):
p, q = st_remain[i]
if q == b and p <= a:
found = True
st_remain.remove((p, q))
if p < a:
st_remain.append((p, a))
break
if not found:
return None
if not Strands(sd1.pmc, st_remain).rightCompatible(sd2.getLeftIdem()):
return None
result = StrandDiagram(sd1.parent, None, st_remain, sd2.getLeftIdem())
if result * sd2 == 1 * sd1:
return result
else:
return None
@memorize
def getHomotopyEdges(self, source):
"""Get the list of homotopy edges starting at source."""
return [TypeAAGraphEdge(source,
TypeAAGraphNode(self, *target), self.HOMOTOPY)
for target in homotopyMap(*source)]
def getAlgLeftTarget(self, source, coeff):
"""If there is a left algebra action edge starting at source and with
coefficient coeff, return the target node of that edge. Otherwise,
return None.
"""
factor = self.algFactor(source[0], coeff)
if factor is None:
return None
return TypeAAGraphNode(self, factor, source[1])
def getAlgRightTarget(self, source, coeff):
"""If there is a right algebra action edge starting at source and with
coefficient coeff, return the target node of that edge. Otherwise,
return None.
"""
prod = source[1] * coeff
if prod == E0:
return None
prod = prod.getElt()
return TypeAAGraphNode(self, source[0], prod)
def _searchDoubleD(self, d_graph1, d_graph2, start_pos):
"""Search for paths in d_graph1*self*d_graph2 with the given list of
starting positions (each element of start_pos is a tuple (d1_pos,
d2_pos, aa_pos)). Returns a list of lists of end positions.
"""
def search(d1_pos, d2_pos, aa_pos, is_homotopy, depth):
"""Helper function performing a one step search, starting at the
given locations in the three graphs. If is_homotopy is set, the
next move must be a homotopy. Otherwise, it must be an algebra
action (on either side). Returns the list of end states with aa_pos
at a homology node.
-- depth: used for debugging only
"""
result = []
if is_homotopy:
if aa_pos.isHomology():
result.append((d1_pos, d2_pos, aa_pos))
else:
for edge in self.getHomotopyEdges(aa_pos):
result += search(d1_pos, d2_pos, edge.target, False,
depth+1)
else:
for d1_edge in d_graph1.getOutEdges(d1_pos):
target = self.getAlgLeftTarget(aa_pos, d1_edge.coeff.opp())
if target is not None:
result += search(d1_edge.target, d2_pos, target, True,
depth+1)
for d2_edge in d_graph2.getOutEdges(d2_pos):
target = self.getAlgRightTarget(aa_pos, d2_edge.coeff)
if target is not None:
result += search(d1_pos, d2_edge.target, target, True,
depth+1)
return result
full_result = []
for d1_pos, d2_pos, aa_pos in start_pos:
full_result.append(search(d1_pos, d2_pos, aa_pos, False, 0))
return full_result
def tensorDoubleD(self, d_graph1, d_graph2):
"""Computes the chain complex D1 * CFAA(Id) * D2, where D1, D2 are type
D structures with graphs d_graph1, d_graph2, and CFAA(Id) is
represented by this graph. Both D1 and D2 are left type D structures.
The algebra acting on D1 is opposite of self.pmc_alg, and the algebra
acting on D2 is the same as self.pmc_alg
"""
assert d_graph1.algebra.opp() == self.pmc_alg
assert d_graph2.algebra == self.pmc_alg
cx = SimpleChainComplex(F2)
# Generators of the chain complex:
for node1 in d_graph1.getNodes():
for node2 in d_graph2.getNodes():
if node1.idem == node2.idem.opp().comp():
cur_gen = ATensorDGenerator(cx, node1.dgen, node2.dgen)
cx.addGenerator(cur_gen)
# Search the graphs for edges in the chain complex
for gen_start in cx.getGenerators():
dgen1, dgen2 = gen_start
d1_pos = d_graph1.graph_node[dgen1]
d2_pos = d_graph2.graph_node[dgen2]
aa_pos = self.homology_node[dgen1.idem.opp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(d_graph1, d_graph2, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_end = ATensorDGenerator(cx, d1_end.dgen, d2_end.dgen)
cx.addDifferential(gen_start, gen_end, 1)
return cx
def tensorDDandD(self, dd_graph, d_graph):
"""Computes the type D structure DD1 * CFAA(Id) * D2, where DD1 is a
type DD structure with graph dd_graph, CFAA(Id) is represented by this
graph, and D2 is a type D structure with graph d_graph.
"""
assert dd_graph.tensor_side == 2
assert dd_graph.algebra2.opp() == self.pmc_alg
assert d_graph.algebra == self.pmc_alg
dstr = SimpleDStructure(F2, dd_graph.algebra1)
# Generators of the type D structure:
for ddgen, node1 in list(dd_graph.ddgen_node.items()):
for node2 in d_graph.getNodes():
if node1.idem2 == node2.idem.opp().comp():
cur_gen = DATensorDGenerator(dstr, ddgen, node2.dgen)
dstr.addGenerator(cur_gen)
# Search the graphs for type D operations
for gen_start in dstr.getGenerators():
ddgen, dgen = gen_start
d1_pos = dd_graph.ddgen_node[ddgen]
d2_pos = d_graph.graph_node[dgen]
aa_pos = self.homology_node[ddgen.idem2.opp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(dd_graph, d_graph, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_end = DATensorDGenerator(dstr, d1_end.ddgen, d2_end.dgen)
dstr.addDelta(gen_start, gen_end, d1_end.sd, 1)
return dstr
def tensorDandDD(self, d_graph, dd_graph):
"""Computes the type D structure D1 * CFAA(Id) * DD2, where D1 is a
type D structure with graph d_graph, CFAA(Id) is represented by this
graph, and DD2 is a type DD structure with graph dd_graph.
"""
assert dd_graph.tensor_side == 1
assert d_graph.algebra.opp() == self.pmc_alg
assert dd_graph.algebra1 == self.pmc_alg
dstr = SimpleDStructure(F2, dd_graph.algebra2)
# Generators of the type D structure:
for node1 in d_graph.getNodes():
for ddgen, node2 in list(dd_graph.ddgen_node.items()):
if node1.idem == node2.idem1.opp().comp():
cur_gen = ATensorDDGenerator(dstr, node1.dgen, ddgen)
dstr.addGenerator(cur_gen)
# Search the graphs for type D operations
for gen_start in dstr.getGenerators():
dgen, ddgen = gen_start
d1_pos = d_graph.graph_node[dgen]
d2_pos = dd_graph.ddgen_node[ddgen]
aa_pos = self.homology_node[dgen.idem.opp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(d_graph, dd_graph, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_end = ATensorDDGenerator(dstr, d1_end.dgen, d2_end.ddgen)
dstr.addDelta(gen_start, gen_end, d2_end.sd, 1)
return dstr
def tensorDoubleDD(self, dd_graph1, dd_graph2):
"""Compute the type DD structure DD1 * CFAA(Id) * DD2."""
assert dd_graph1.tensor_side == 2 and dd_graph2.tensor_side == 1
assert dd_graph1.algebra2.opp() == self.pmc_alg
assert dd_graph2.algebra1 == self.pmc_alg
ddstr = SimpleDDStructure(F2, dd_graph1.algebra1, dd_graph2.algebra2)
# Generators of the type DD structure:
for ddgen1, node1 in list(dd_graph1.ddgen_node.items()):
for ddgen2, node2 in list(dd_graph2.ddgen_node.items()):
if node1.idem2 == node2.idem1.opp().comp():
cur_gen = DATensorDDGenerator(ddstr, ddgen1, ddgen2)
ddstr.addGenerator(cur_gen)
# Search the graphs for type DD operations
for gen_start in ddstr.getGenerators():
ddgen1, ddgen2 = gen_start
d1_pos = dd_graph1.ddgen_node[ddgen1]
d2_pos = dd_graph2.ddgen_node[ddgen2]
aa_pos = self.homology_node[ddgen1.idem2.opp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(dd_graph1, dd_graph2, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_end = DATensorDDGenerator(ddstr,
d1_end.ddgen, d2_end.ddgen)
ddstr.addDelta(gen_start, gen_end, d1_end.sd, d2_end.sd, 1)
return ddstr
def tensorAAandDD(self, dd_graph):
"""Returns the type DA structure formed by tensoring dd_graph with
self, with self placed at left and dd_graph placed at right.
"""
assert dd_graph.tensor_side == 1
assert dd_graph.algebra1 == self.pmc_alg
alg_opp = self.pmc_alg.opp()
# The generating set for the type DA structure and the dd_graph is the
# same.
result = SimpleDAStructure(F2, dd_graph.algebra2, alg_opp)
ddgen_to_dagen_map = {}
for ddgen in dd_graph.ddgen_node:
cur_gen = SimpleDAGenerator(
result, ddgen.idem2, ddgen.idem1.opp().comp(), str(ddgen))
ddgen_to_dagen_map[ddgen] = cur_gen
result.addGenerator(cur_gen)
univ_digraph = UniversalDiGraph(alg_opp)
for ddgen, d2_pos in list(dd_graph.ddgen_node.items()):
d1_pos = univ_digraph.getInitialNode()
aa_pos = self.homology_node[ddgen.idem1.comp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(univ_digraph, dd_graph, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_from = ddgen_to_dagen_map[ddgen]
gen_to = ddgen_to_dagen_map[d2_end.ddgen]
result.addDelta(gen_from, gen_to, d2_end.sd, tuple(d1_end), 1)
return result
def tensorDDandAA(self, dd_graph):
"""Returns the type DA structure formed by tensoring dd_graph with
self, with dd_graph placed at left and self placed at right.
"""
# Currently very slow, only really work in genus 1 case
assert dd_graph.tensor_side == 2
assert dd_graph.algebra2 == self.pmc_alg.opp()
alg = self.pmc_alg
# The generating set for the type DA structure and the dd_graph is the
# same.
result = SimpleDAStructure(F2, dd_graph.algebra1, alg)
ddgen_to_dagen_map = {}
for ddgen in dd_graph.ddgen_node:
cur_gen = SimpleDAGenerator(
result, ddgen.idem1, ddgen.idem2.opp().comp(), str(ddgen))
ddgen_to_dagen_map[ddgen] = cur_gen
result.addGenerator(cur_gen)
univ_digraph = UniversalDiGraph(alg)
for ddgen, d1_pos in list(dd_graph.ddgen_node.items()):
d2_pos = univ_digraph.getInitialNode()
aa_pos = self.homology_node[ddgen.idem2.comp()]
pos = [(d1_pos, d2_pos, aa_pos)]
end_states = self._searchDoubleD(dd_graph, univ_digraph, pos)[0]
for d1_end, d2_end, aa_end in end_states:
gen_from = ddgen_to_dagen_map[ddgen]
gen_to = ddgen_to_dagen_map[d1_end.ddgen]
result.addDelta(gen_from, gen_to, d1_end.sd, tuple(d2_end), 1)
return result
@memorize
def getTypeAAGraph(pmc):
"""Memorized version of constructor of TypeAAGraph."""
return TypeAAGraph(pmc)
def computeATensorD(dstr1, dstr2):
"""Compute the tensor product dstr1 * CFAA(Id) * dstr2. dstr1 and dstr2 are
left type D structures over opposite PMC's. Result is a chain complex.
"""
aa_graph = getTypeAAGraph(dstr2.algebra.pmc)
cx = aa_graph.tensorDoubleD(TypeDGraph(dstr1), TypeDGraph(dstr2))
# Add gradings if necessary
if hasattr(dstr1, "gr_set") and hasattr(dstr2, "gr_set"):
cx.gr_set = GeneralGradingSet([dstr1.gr_set.Ropp(), dstr2.gr_set])
cx.grading = dict()
for gen in cx.getGenerators():
dgen1, dgen2 = gen
cx.grading[gen] = GeneralGradingSetElement(
cx.gr_set, [dstr1.grading[dgen1].Ropp(), dstr2.grading[dgen2]])
return cx
def computeDATensorD(ddstr1, dstr2):
"""Compute the tensor product ddstr1 * CFAA(Id) * dstr2. ddstr1 is a left
type DD structure and dstr2 is a left type D structure. The algebra of the
second action of ddstr1 is opposite to the algebra of the action on dstr2.
"""
aa_graph = getTypeAAGraph(dstr2.algebra.pmc)
dstr = aa_graph.tensorDDandD(TypeDDGraph(ddstr1, 2), TypeDGraph(dstr2))
# Add gradings if necessary
if hasattr(ddstr1, "gr_set") and hasattr(dstr2, "gr_set"):
dstr.gr_set = GeneralGradingSet(
[ddstr1.gr_set.partialRopp(1), dstr2.gr_set])
dstr.grading = dict()
for dgen in dstr.getGenerators():
ddgen1, dgen2 = dgen
dstr.grading[dgen] = GeneralGradingSetElement(
dstr.gr_set, [ddstr1.grading[ddgen1].partialRopp(1),
dstr2.grading[dgen2]])
return dstr
def computeATensorDD(dstr1, ddstr2):
"""Compute the tensor product dstr1 * CFAA(Id) * ddstr2. dstr1 is a left
type D structure and ddstr2 is a left type DD structure. The algebra of the
action on dstr1 is the opposite to the algebra of the first action on
dstr2.
"""
aa_graph = getTypeAAGraph(ddstr2.algebra1.pmc)
dstr = aa_graph.tensorDandDD(TypeDGraph(dstr1), TypeDDGraph(ddstr2, 1))
# Add gradings if necessary
if hasattr(dstr1, "gr_set") and hasattr(ddstr2, "gr_set"):
dstr.gr_set = GeneralGradingSet(
[ddstr2.gr_set.partialRopp(0), dstr1.gr_set])
dstr.grading = dict()
for dgen in dstr.getGenerators():
dgen1, ddgen2 = dgen
dstr.grading[dgen] = GeneralGradingSetElement(
dstr.gr_set, [ddstr2.grading[ddgen2].partialRopp(0),
dstr1.grading[dgen1]])
return dstr
def computeDATensorDD(ddstr1, ddstr2):
"""Compute the tensor product ddstr1 * CFAA(Id) * ddstr2. Both ddstr1 and
ddstr2 are left type DD structures. The algebra of the second action on
ddstr1 is the opposite to the algebra of the first action on ddstr2.
"""
aa_graph = getTypeAAGraph(ddstr2.algebra1.pmc)
ddstr = aa_graph.tensorDoubleDD(TypeDDGraph(ddstr1, 2),
TypeDDGraph(ddstr2, 1))
# Add gradings if necessary
if hasattr(ddstr1, "gr_set") and hasattr(ddstr2, "gr_set"):
ddstr.gr_set = GeneralGradingSet(
[ddstr1.gr_set.partialRopp(1), ddstr2.gr_set])
ddstr.grading = dict()
for ddgen in ddstr.getGenerators():
ddgen1, ddgen2 = ddgen
ddstr.grading[ddgen] = GeneralGradingSetElement(
ddstr.gr_set, [ddstr1.grading[ddgen1].partialRopp(1),
ddstr2.grading[ddgen2]])
return ddstr
| 29,351
| 38.826323
| 82
|
py
|
bfh_python
|
bfh_python-master/extendbyidtest.py
|
"""Unit test for extendbyid.py"""
from extendbyid import *
from dastructure import SimpleDAGenerator
from ddstructure import identityDD
from localpmc import LocalIdempotent
from pmc import Idempotent
from pmc import linearPMC, splitPMC
import unittest
class ExtendedDAStructureTest(unittest.TestCase):
def setUp(self):
self.pmc = splitPMC(2)
self.splitting = PMCSplitting(self.pmc, [(0, 2)])
local_pmc = self.splitting.local_pmc
outer_pmc = self.splitting.outer_pmc
# Construct the local generators
local_da = LocalDAStructure(
F2, local_pmc.getAlgebra(), local_pmc.getAlgebra())
idems = {"x1" : ([], []),
"x2" : ([0], [0]),
"x3" : ([0], [1]),
"x4" : ([1], [1]),
"x5" : ([0, 1], [0, 1])}
gens = {}
for name, (l_idem, r_idem) in list(idems.items()):
gens[name] = SimpleDAGenerator(
local_da, LocalIdempotent(local_pmc, l_idem),
LocalIdempotent(local_pmc, r_idem), name)
local_da.addGenerator(gens[name])
local_da.auto_u_map()
for name_from, name_to, algs_a, alg_d in [
# Example from _short_underslide_down_middle in arcslideda.py
# (subtract one from there since the bottom is closed).
("x3", "x4", [], [(0, 1)]),
("x2", "x2", [[(0, 2)]], [(0, 2)]),
("x5", "x5", [[1, (0, 2)]], [1, (0, 2)]),
("x2", "x1", [[(2, 3)]], [(2, 3)]),
("x5", "x4", [[1, (2, 3)]], [1, (2, 3)]),
("x4", "x3", [[(1, 2)], [(0, 1)]], [(1, 2)])]:
local_da.addDelta(gens[name_from], gens[name_to],
local_pmc.sd(alg_d),
[local_pmc.sd(alg_a) for alg_a in algs_a], 1)
self.extended_da = ExtendedDAStructure(
local_da, self.splitting, self.splitting)
mod_gens = self.extended_da.getGenerators()
# Set up short names for the extended generators
extended_idems = {"y1" : ([0, 1], [0, 1]),
"y2" : ([0, 2], [0, 2]),
"y3" : ([0, 3], [0, 3]),
"y4" : ([1, 2], [1, 2]),
"y5" : ([1, 3], [1, 3]),
"y6" : ([2, 3], [2, 3]),
"y7" : ([0, 2], [1, 2]),
"y8" : ([0, 3], [1, 3])}
self.extended_gens = {}
for name, (l_idem, r_idem) in list(extended_idems.items()):
for gen in mod_gens:
if gen.idem1 == Idempotent(self.pmc, l_idem) and \
gen.idem2 == Idempotent(self.pmc, r_idem):
self.extended_gens[name] = gen
def testGetGenerators(self):
self.assertEqual(len(self.extended_da.getGenerators()), 8)
self.assertEqual(len(self.extended_gens), 8)
def testDelta(self):
for x, algs_a, alg_d, y in [
("y7", [], [4, (0, 1)], "y4"),
("y8", [], [5, (0, 1)], "y5"),
("y2", [[4, (0, 2)]], [4, (0, 2)], "y2"),
("y2", [[4, (2, 3)]], [4, (2, 3)], "y4"),
("y1", [[1, (0, 2)]], [1, (0, 2)], "y1"),
("y1", [[1, (2, 4)]], [1, (2, 4)], "y4"),
("y4", [[(1, 2), (4, 5)], [(0, 1), (5, 6)]],
[(1, 2), (4, 6)], "y7")]:
self.assertEqual(self.extended_da.delta(
self.extended_gens[x], [self.pmc.sd(a) for a in algs_a]),
self.pmc.sd(alg_d) * self.extended_gens[y])
def testDeltaPrefix(self):
# Restriction of one of the testDelta cases
self.assertTrue(self.extended_da.deltaPrefix(
self.extended_gens["y4"], [self.pmc.sd([(1, 2), (4, 5)])]))
# Haven't stored such a local arrow
self.assertFalse(self.extended_da.deltaPrefix(
self.extended_gens["y4"], [self.pmc.sd([(1, 3), (4, 5)])]))
class AntiBraidTest(unittest.TestCase):
# A test of the local situation of anti-braid resolution, involving two
# unpaired points. Note this is a simple case before making an isotopy. It
# cannot be used in actual calculations because the DA action would be
# infinitely large.
def setUp(self):
self.pmc = linearPMC(2)
self.splitting = PMCSplitting(self.pmc, [(1, 4)])
# Correspondence between points in pmc and local / outer pmc:
# pmc - 0 1 2 3 4 5 6 7
# local_pmc - 0* 1 2 3 4 5*
# outer_pmc - 0 1* 2* 3 4 5
local_pmc = self.splitting.local_pmc
outer_pmc = self.splitting.outer_pmc
# Construct the local generators. In local_pmc, idempotent 0 is (1, 4),
# idempotent 1 is (2,), idempotent 2 is (3,).
local_da = LocalDAStructure(
F2, local_pmc.getAlgebra(), local_pmc.getAlgebra(),
single_idems1 = [1, 2], single_idems2 = [1, 2])
idems = {"x1" : ([0], [1]),
"x2" : ([0], [2]),
"x3" : ([0, 2], [1, 2]),
"x4" : ([0, 1], [1, 2])}
gens = {}
for name, (l_idem, r_idem) in list(idems.items()):
gens[name] = SimpleDAGenerator(
local_da, LocalIdempotent(local_pmc, l_idem),
LocalIdempotent(local_pmc, r_idem), name)
local_da.addGenerator(gens[name])
local_da.auto_u_map()
for name_from, name_to, algs_a, alg_d in [
# Some examples of local DA actions
("x4", "x3", [], [1, (2, 3)]),
("x1", "x2", [[(2, 3)]], [1]),
]:
local_da.addDelta(gens[name_from], gens[name_to],
local_pmc.sd(alg_d),
[local_pmc.sd(alg_a) for alg_a in algs_a], 1)
self.extended_da = ExtendedDAStructure(
local_da, self.splitting, self.splitting)
mod_gens = self.extended_da.getGenerators()
# Set up short names for the extended generators. Here y_i is the unique
# extension of x_i.
extended_idems = {"y1" : ([1, 3], [0, 3]),
"y2" : ([1, 3], [2, 3]),
"y3" : ([1, 2], [0, 2]),
"y4" : ([0, 1], [0, 2])}
self.extended_gens = {}
for name, (l_idem, r_idem) in list(extended_idems.items()):
for gen in mod_gens:
if gen.idem1 == Idempotent(self.pmc, l_idem) and \
gen.idem2 == Idempotent(self.pmc, r_idem):
self.extended_gens[name] = gen
def testGetGenerators(self):
self.assertEqual(len(self.extended_da.getGenerators()), 4)
self.assertEqual(len(self.extended_gens), 4)
def testDelta(self):
for x, algs_a, alg_d, y in [
("y4", [], [1, (2, 3)], "y3"),
("y1", [[5, (2, 3)]], [1, 5], "y2")]:
self.assertEqual(self.extended_da.delta(
self.extended_gens[x], [self.pmc.sd(a) for a in algs_a]),
self.pmc.sd(alg_d) * self.extended_gens[y])
def testDeltaPrefix(self):
# TODO: add test cases
pass
class IdentityDDLocalTest(unittest.TestCase):
def testIdentityDDLocal(self):
splitting = PMCSplitting(linearPMC(2), [(1, 4)])
local_pmc = splitting.local_pmc
id_local = identityDALocal(local_pmc)
extended_id = ExtendedDAStructure(id_local, splitting, splitting)
id_dd = extended_id.tensorDD(identityDD(linearPMC(2)))
self.assertTrue(id_dd.compareDDStructures(identityDD(linearPMC(2))))
if __name__ == "__main__":
unittest.main()
| 7,759
| 42.351955
| 80
|
py
|
bfh_python
|
bfh_python-master/signs.py
|
"""Sign conventions."""
from fractions import Fraction
from algebra import findRankOverF2
from algebra import DGAlgebra, Element, Generator
from algebra import E0
from grading import standardRefinement, standardRefinementForIdem
from grading import DEFAULT_REFINEMENT
from linalg import F2RowSystem
from pmc import StrandAlgebra, StrandDiagram
from utility import memorize
from utility import F2
class AbsZ2Grading(object):
"""Computes absolute Z/2Z grading for the strand algebra."""
def __init__(self, algebra):
"""Creates data needed for computation of Z/2Z grading for the given
strand algebra.
"""
self.algebra = algebra
gen_list = algebra.getGenerators()
self.init_idem = gen_list[0].left_idem
# Compute the set of idempotents that have offset 1/2 with init_idem.
# This is stored as self.adjusted_idem.
self.adjusted_idem = set()
self.tested_idem = set([self.init_idem])
def testAdd(gen, tested_idem, to_test_idem):
# Compute absolute grading on gen to see if its two idempotents
# should have 1/2 offsets.
if to_test_idem in self.tested_idem:
return
self.tested_idem.add(to_test_idem)
maslov = self._getAbsGradingRaw(gen)
assert maslov.denominator <= 2
is_different = (maslov.denominator != 1)
prev_adjust = (tested_idem in self.adjusted_idem)
if (is_different and not prev_adjust) or \
(not is_different and prev_adjust):
self.adjusted_idem.add(to_test_idem)
need_repeat = True
while need_repeat:
need_repeat = False
for gen in gen_list:
if gen.left_idem in self.tested_idem:
testAdd(gen, gen.left_idem, gen.right_idem)
elif gen.right_idem in self.tested_idem:
testAdd(gen, gen.right_idem, gen.left_idem)
else:
need_repeat = True
def _getAbsGradingRaw(self, sd):
"""Returns the maslov grading from which the absolute grading is
derived (no 1/2 offsets applied).
"""
assert sd.parent == self.algebra
small_gr = sd.getSmallGrading(refinement = standardRefinement);
gr_group = small_gr.parent
for i in range(len(small_gr.spinc)):
mult = -small_gr.spinc[i]
small_gr = small_gr * gr_group.basis(i).power(mult)
small_gr.maslov += Fraction(-1, 2) * mult
assert all([n == 0 for n in small_gr.spinc])
return small_gr.maslov
def getAbsGrading(self, sd):
"""Returns the absolute Z/2Z grading (returned value is either 0 or 1).
"""
maslov = self._getAbsGradingRaw(sd)
if sd.left_idem in self.adjusted_idem:
maslov -= Fraction(1, 2)
if sd.right_idem in self.adjusted_idem:
maslov += Fraction(1, 2)
assert maslov.denominator == 1
return maslov.numerator % 2
class PreStrandDiagram(Generator):
"""A pre-strand diagram on n points.
Difference with StrandDiagram in pmc.py is that there are no matching on
points. There are only single horizontals, and there may be strands
beginning or ending at any pair of points.
"""
def __init__(self, parent, strands):
"""Specifies the parent algebra (of type PreStrandAlgebra), and a list
of strands. Each element of strands is a pair specifying the starting
and ending points.
"""
Generator.__init__(self, parent)
self.pmc = parent.pmc
self.strands = tuple(sorted(strands))
self.left_pt_idem = tuple(sorted([s for s, t in self.strands]))
self.right_pt_idem = tuple(sorted([t for s, t in self.strands]))
self.left_idem = tuple(sorted([self.pmc.pairid[s]
for s in self.left_pt_idem]))
self.right_idem = tuple(sorted([self.pmc.pairid[s]
for s in self.right_pt_idem]))
def numCrossing(self):
"""Returns the number of crossings between moving strands."""
# Note this counts crossing between single horizontals
return sum(1 for (s1, t1) in self.strands for (s2, t2) in self.strands
if s1 < s2 and t1 > t2)
def getBigGrading(self):
"""Returns the big grading."""
multiplicity = [0] * (self.pmc.n - 1)
for s, t in self.strands:
for pos in range(s, t):
multiplicity[pos] += 1
maslov = 0
for s, t in self.strands:
if s != self.pmc.n - 1:
maslov -= Fraction(multiplicity[s], 2)
if s != 0:
maslov -= Fraction(multiplicity[s-1], 2)
maslov += self.numCrossing()
return self.pmc.big_gr(maslov, multiplicity)
@memorize
def getSmallGrading(self, refinement = DEFAULT_REFINEMENT):
"""Returns the small grading."""
# standardRefinement ensures integrality of spin-c components of the
# grading.
assert refinement == standardRefinement
refine_data = refinement(self.pmc, len(self.left_idem))
p_l, p_r = [standardRefinementForIdem(self.pmc, idem)
for idem in [self.left_idem, self.right_idem]]
return (p_l * self.getBigGrading() * p_r.inverse()).toSmallGrading()
def isIdempotent(self):
"""Tests whether this generator is an idempotent."""
return all([s == t for s, t in self.strands])
def __str__(self):
return "[%s]" % ",".join(["%s->%s" % (p, q) for p, q in self.strands])
def __repr__(self):
return str(self)
def __eq__(self, other):
if other is None:
return False
return self.parent == other.parent and self.strands == other.strands
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.parent, self.strands))
# Don't need anything beyond Element at this time.
PreStrandDiagram.ELT_CLASS = Element
class PreStrandAlgebra(DGAlgebra):
"""Represents the strand algebra of a local PMC."""
def __init__(self, ring, pmc, idem_size):
"""Specifies the number of points n and the number of strands k."""
DGAlgebra.__init__(self, ring)
assert idem_size <= pmc.genus * 2, "idem_size too large"
self.pmc = pmc
self.idem_size = idem_size
self.abs_gr = AbsZ2Grading(self)
def __str__(self):
return "Pre strand algebra over %s with idem_size = %d" % \
(str(self.pmc), self.idem_size)
def __eq__(self, other):
return self.pmc == other.pmc and self.idem_size == other.idem_size
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(("PreStrandAlgebra", self.pmc, self.idem_size))
@memorize
def getGenerators(self):
result = []
def helper(strands):
# strands is the current list of strands.
if len(strands) == self.idem_size:
result.append(PreStrandDiagram(self, strands))
return
start = 0
if len(strands) > 0:
start = strands[-1][0] + 1
for p in range(start, self.pmc.n):
for q in range(p, self.pmc.n):
if q not in [end for start, end in strands]:
helper(strands + [(p, q)])
helper([])
return result
@memorize
def getGeneratorsForPtIdem(self, l_pt_idem = None, r_pt_idem = None):
"""Returns the list of idempotents with the specified left_pt_idem and
right_pt_idem. Giving None as input means no constraints there.
"""
return [gen for gen in self.getGenerators() if
(l_pt_idem is None or gen.left_pt_idem == l_pt_idem) and
(r_pt_idem is None or gen.right_pt_idem == r_pt_idem)]
@memorize
def _diffRaw(self, gen):
"""Returns a list of elements of the form ((n1, n2), diff_term), where
n1, n2 are indices of strands in gen that crosses, and diff_term is a
generator in gen.diff() obtained by uncrossing these two strands.
Together they specify all terms in gen.diff(). Elements in the list are
sorted by (n1, n2).
"""
target_num_crossing = gen.numCrossing() - 1
result = []
for n1 in range(len(gen.strands)):
for n2 in range(len(gen.strands)):
s1, t1 = gen.strands[n1]
s2, t2 = gen.strands[n2]
if s1 < s2 and t1 > t2:
new_strands = list(gen.strands)
new_strands.remove((s1, t1))
new_strands.remove((s2, t2))
new_strands.extend([(s1, t2), (s2, t1)])
diff_term = PreStrandDiagram(self, new_strands)
if diff_term.numCrossing() == target_num_crossing:
result.append(((n1, n2), diff_term))
return result
@memorize
def diff(self, gen):
result = E0
for (n1, n2), diff_term in self._diffRaw(gen):
if self.ring is F2:
result += diff_term.elt()
else:
result += self.diffSign(gen, n1, n2) * diff_term
return result
@memorize
def _multiplyRaw(self, gen1, gen2):
"""If gen1 and gen2 can be multiplied, return the generator that is
their product. Otherwise, return None.
"""
if gen1.right_pt_idem != gen2.left_pt_idem:
return None
new_strands = []
for s1, t1 in gen1.strands:
t2_list = [t2 for s2, t2 in gen2.strands if s2 == t1]
assert len(t2_list) == 1
new_strands.append((s1, t2_list[0]))
product = PreStrandDiagram(self, new_strands)
if product.numCrossing() == gen1.numCrossing() + gen2.numCrossing():
return product
else:
return None
@memorize
def multiply(self, gen1, gen2):
if not isinstance(gen1, PreStrandDiagram):
return NotImplemented
if not isinstance(gen2, PreStrandDiagram):
return NotImplemented
product = self._multiplyRaw(gen1, gen2)
if product is None:
return E0
if self.ring is F2:
return product.elt()
else:
return self.multiplySign(gen1, gen2) * product
@memorize
def diffSign(self, gen, n1, n2):
"""Returns the sign (+/-1) of the differential of gen when resolving the
crossing between strands indexed n1 and n2.
The cancellation in d^2=0 we use is:
gen -- (n1, n2) --> n_term -- (nk1, nk2) --> nk_term
gen -- (k1, k2) --> k_term -- (kn1, kn2) --> kn_term
"""
dgen_raw = self._diffRaw(gen)
n_term = None
for (m1, m2), diff_term in dgen_raw:
if (m1, m2) == (n1, n2):
n_term = diff_term
break
assert n_term is not None
# Key pair (k1, k2) satisfy the property that there are no k' such that
# (k1, k') is resolvable (by ordering) or (k', k2) is resolvable.
(k1, k2), k_term = dgen_raw[0]
for (m1, m2), m_term in dgen_raw:
if k1 < m1 and k2 == m2:
(k1, k2), k_term = (m1, m2), m_term
if (n1, n2) == (k1, k2):
return 1 # This arrow is defined to be positive
# Find a cancellation term
dn_raw = self._diffRaw(n_term)
dk_raw = self._diffRaw(k_term)
for (nk1, nk2), nk_term in dn_raw:
for (kn1, kn2), kn_term in dk_raw:
if nk_term == kn_term:
return self.diffSign(n_term, nk1, nk2) * \
self.diffSign(k_term, kn1, kn2) * -1
# Should not get to this point
assert False
@memorize
def multiplySign(self, a, b):
"""Returns the sign (+/-1) of the multiplication between a and b.
"""
prod = self._multiplyRaw(a, b)
assert prod is not None
# All multiplication involving idempotents are positive.
if a.isIdempotent() or b.isIdempotent():
return 1
# prod is differentiable. Define using product on generators with a
# smaller number of crossings.
dprod_raw = self._diffRaw(prod)
if len(dprod_raw) > 0:
da_raw = self._diffRaw(a)
db_raw = self._diffRaw(b)
# Pick a term in d(prod), and find the term in either da*b or a*db
# that cancels it.
(p1, p2), dprod_term = dprod_raw[0]
for (a1, a2), da_term in da_raw:
if self._multiplyRaw(da_term, b) == dprod_term:
return self.diffSign(prod, p1, p2) * \
self.diffSign(a, a1, a2) * \
self.multiplySign(da_term, b)
for (b1, b2), db_term in db_raw:
if self._multiplyRaw(a, db_term) == dprod_term:
return self.diffSign(prod, p1, p2) * \
self.diffSign(b, b1, b2) * \
self.multiplySign(a, db_term) * self.grSign(a)
# Should not get to this point
assert False
# Now prod (and therefore a and b) has no crossings. (k_s, k_t) is the
# initial piece of the last moving strand in prod (this ensures that the
# chord can always be factored from the left).
prod_s, prod_t = next((s, t) for s, t in reversed(prod.strands)
if s < t)
k_s, k_t = (prod_s, prod_s + 1)
other_chords_prod = [(s, t) for s, t in prod.strands if s != prod_s]
k_a = PreStrandDiagram(
self, [(s, s) for s, t in other_chords_prod] + [(k_s, k_t)])
k_b = PreStrandDiagram(self, other_chords_prod + [(k_t, prod_t)])
assert self._multiplyRaw(k_a, k_b) == prod
if a == k_a:
return 1 # This product is defined to be positive
# Two cases for when prod has no crossings
ka_s, ka_t = next((s, t) for s, t in a.strands if s == k_s)
if ka_t > ka_s: # Case 1: (k_s, k_t) is part of a
other_chords_a = [(s, t) for s, t in a.strands if s != k_s]
l_a = PreStrandDiagram(self, other_chords_a + [(k_t, ka_t)])
assert self._multiplyRaw(k_a, l_a) == a
assert self._multiplyRaw(k_a, self._multiplyRaw(l_a, b)) == prod
return self.multiplySign(k_a, l_a) * self.multiplySign(l_a, b)
else: # Case 2: (k_s, k_t) is not part of a, ka_s == k_s == ka_t
other_chords_b = [(s, t) for s, t in b.strands if s != k_s]
kb_s, kb_t = prod_s, prod_t
assert (kb_s, kb_t) in b.strands
l1_b = PreStrandDiagram(
self, [(s, s) for s, t in other_chords_b] + [(k_s, k_t)])
l2_b = PreStrandDiagram(self, other_chords_b + [(k_t, prod_t)])
if l2_b.isIdempotent():
# Will cause an infinite loop. This reflects the assumption that
# a * l1_b = k_a * k_b with positive sign.
return 1
assert self._multiplyRaw(l1_b, l2_b) == b
assert self._multiplyRaw(self._multiplyRaw(a, l1_b), l2_b) == prod
return self.multiplySign(l1_b, l2_b) * \
self.multiplySign(a, l1_b) * \
self.multiplySign(self._multiplyRaw(a, l1_b), l2_b)
class SignLinAlg(object):
"""Try to obtain a sign convention by solving a linear algebra problem. """
def __init__(self, algebra):
self.algebra = algebra
self.abs_gr = AbsZ2Grading(algebra)
def grSign(self, gen):
"""Returns (-1)^gr(gen), where gr is the absolute Z/2Z grading."""
return 1 - 2*self.abs_gr.getAbsGrading(gen)
def createRowSystem(self):
"""Create row system. Each row represents either a multiplication or an
arrow in the differential. Each column represents one of the constraints
that must be satisfied.
"""
all_gens = [gen for gen in self.algebra.getGenerators()
if not gen.isIdempotent()]
print("Number of generators:", len(all_gens))
# Maps multiplication / differential to the column index
self.index = dict()
for gen1 in all_gens:
for gen2 in self.algebra.getGeneratorsForIdem(
left_idem = gen1.right_idem):
if (not gen2.isIdempotent()) and gen1 * gen2 != 0:
self.index[("M", gen1, gen2)] = len(self.index)
for gen in all_gens:
for dgen_term in gen.diff():
self.index[("D", gen, dgen_term)] = len(self.index)
num_row = len(self.index)
print("Number of operations:", num_row)
# Linear combination of rows to look for.
expected_sums = []
# Pairs (i, j), zero-based indices indicating a_ij is one.
entries = []
# Current number of columns
num_col = 0
def addColumn(ops, expected_sum):
for op in ops:
entries.append((self.index[op], num_col))
expected_sums.append(expected_sum)
# Now create row for each relation
for gen in all_gens:
# First part: d^2 = 0
# Create a map from terms in ddgen to list of terms in dgen
dd_to_d_map = dict()
for dgen in gen.diff():
for ddgen in dgen.diff():
if ddgen not in dd_to_d_map:
dd_to_d_map[ddgen] = []
dd_to_d_map[ddgen].append(dgen)
# Now use that map to produce the relations in d^2=0
for ddgen, dgen_list in list(dd_to_d_map.items()):
assert len(dgen_list) == 2
dgen1, dgen2 = dgen_list
addColumn([("D", gen, dgen1), ("D", gen, dgen2),
("D", dgen1, ddgen), ("D", dgen2, ddgen)], 1)
num_col += 1
for gen1 in all_gens:
for gen2 in self.algebra.getGeneratorsForIdem(
left_idem = gen1.right_idem):
if gen2.isIdempotent() or gen1 * gen2 == 0:
continue
# Second part: d(ab) = da * b + (-1)^gr(a) a*db
if (gen1 * gen2).diff() != 0:
for dgen12 in (gen1 * gen2).diff():
for dgen1 in gen1.diff():
if dgen1 * gen2 == dgen12.elt():
addColumn([("M", gen1, gen2),
("D", (gen1*gen2).getElt(), dgen12),
("D", gen1, dgen1),
("M", dgen1, gen2)], 0)
num_col += 1
for dgen2 in gen2.diff():
if gen1 * dgen2 == dgen12.elt():
addColumn([("M", gen1, gen2),
("D", (gen1*gen2).getElt(), dgen12),
("D", gen2, dgen2),
("M", gen1, dgen2)],
self.abs_gr.getAbsGrading(gen1))
num_col += 1
# Third part: (ab)c = a(bc)
for gen3 in self.algebra.getGeneratorsForIdem(
left_idem = gen2.right_idem):
if gen3.isIdempotent() or gen2 * gen3 == 0:
continue
if (gen1*gen2) * gen3 == 0:
continue
addColumn(
[("M", gen1, gen2), ("M", (gen1*gen2).getElt(), gen3),
("M", gen2, gen3), ("M", gen1, (gen2*gen3).getElt())],
0)
num_col += 1
print("Number of constraints:", num_col)
# Use the following to find a solution
# matrix = [[0] * num_col for i in range(num_row)]
# for i, j in entries:
# matrix[i][j] = 1
# row_sys = F2RowSystem(matrix)
# comb = row_sys.getComb(expected_sums)
# assert comb is not None, "Cannot be solved"
# for op, index in self.index.items():
# print op, comb[index]
# Use simplify method
row_rank = findRankOverF2(num_row, num_col, entries)
print("Rank:", row_rank)
# Form row system of gauge equivalences
gen_index = dict()
for i in range(len(all_gens)):
gen_index[all_gens[i]] = i
# Pairs (i, j), zero-based indices indicating a_ij is one in gauge
# matrix.
gauge_entries = []
for op, index in list(self.index.items()):
assert op[1] != op[2]
gauge_entries.append((index, gen_index[op[1]]))
gauge_entries.append((index, gen_index[op[2]]))
if op[0] == "M":
gauge_entries.append((index, gen_index[(op[1]*op[2]).getElt()]))
gauge_rank = findRankOverF2(num_row, len(all_gens), gauge_entries)
print("Rank of gauge equivalences:", gauge_rank)
print("Free choice:", num_row - row_rank - gauge_rank)
| 21,494
| 39.480226
| 80
|
py
|
bfh_python
|
bfh_python-master/hdiagram.py
|
"""Code for handling Heegaard diagrams."""
from fractions import Fraction
from grading import BigGradingGroup, SimpleDbGradingSet, \
SimpleDbGradingSetElement, SimpleGradingSet, SimpleGradingSetElement, \
SmallGradingGroup
from grading import DEFAULT_REFINEMENT
from linalg import RowSystem
from pmc import Idempotent, PMC
from utility import SummableDict
from utility import tolist
from utility import ACTION_LEFT, ACTION_RIGHT, BIG_GRADING, DEFAULT_GRADING, \
NEG, POS
"""Constants for each type of segment."""
ALPHA, BETA, BORDER = list(range(3))
def opp(orientation):
"""Return NEG if argument is POS, and POS otherwise."""
if orientation == POS: return NEG
else: return POS
class _Point(object):
"""Represents a point."""
def __init__(self, name):
self.name = name
def __str__(self):
return str(self.name)
def __repr__(self):
return "pt(%s)" % str(self.name)
class _Segment(object):
"""Represents a segment."""
def __init__(self, name, start, end):
"""Specify the name, start point, and end point of this segment."""
self.name = name
self.start = start
self.end = end
def __str__(self):
return "%s->%s" % (str(self.start), str(self.end))
def __repr__(self):
return "seg(%s: %s->%s)" % \
(str(self.name), str(self.start), str(self.end))
def oseg(self):
"""Get the oriented segment with POS direction."""
return _OrientedSegment(self, POS)
def orseg(self):
"""Get the oriented segment with NEG direction."""
return _OrientedSegment(self, NEG)
def bdZeroChain(self):
"""Returns the zero-chain (end - start)."""
return _ZeroChain({self.end : 1}) + _ZeroChain({self.start : -1})
class _OrientedSegment(object):
"""Represents an oriented segment as a segment together with an
orientation.
"""
def __init__(self, seg, orientation):
"""Specify the underlying segment as well as orientation (either POS or
NEG
"""
self.seg = seg
assert orientation == POS or orientation == NEG, \
"Orientation cannot be %d" % orientation
self.orientation = orientation
if orientation == POS:
self.start = seg.start
self.end = seg.end
self.longname = str(seg.name)
else:
self.start = seg.end
self.end = seg.start
self.longname = str(seg.name) + "_r"
def __str__(self):
return "%s->%s" % (str(self.start), str(self.end))
def __repr__(self):
return "oseg(%s: %s->%s)" % \
(str(self.longname), str(self.start), str(self.end))
def __eq__(self, other):
return self.seg == other.seg and self.orientation == other.orientation
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.seg, self.orientation))
def toOneChain(self):
"""Return the one-chain of this oriented segment."""
return _OneChain({self.seg : self.orientation})
def opp(self):
"""Return the opposite oriented segment."""
return _OrientedSegment(self.seg, opp(self.orientation))
class _Path(list):
"""A path represented as a list of OrientedSegment."""
def __init__(self, data = None, name = "", iscycle = False):
"""Specify the data as a list of OrientedSegment. Optionally, provide
a name for the path, and/or whether this path should be considered as a
cycle.
"""
if data is None:
data = []
list.__init__(self, data)
self.name = name
self.iscycle = iscycle
if not self._checkValidity():
raise ValueError("Invalid path")
def _checkValidity(self):
"""Performs the following checks:
#. Whether the end point of each segment agrees with the start point of
the next one.
#. If ``iscycle`` is set to ``True``, then check whether the end point
of the last segment agrees with the start point of the first.
"""
for i in range(len(self)-1):
if self[i].end != self[i+1].start:
return False
if self.iscycle and self[-1].end != self[0].start:
return False
return True
def __str__(self):
result = str(self[0].start)
result += "".join(["->%s" % oseg.end for oseg in self])
return result
def __repr__(self):
if self.name == "":
return "path(%s)" % str(self)
else:
return "path(%s: %s)" % (str(self.name), str(self))
def toOneChain(self):
"""Return the one-chain of this path."""
return _OneChain().accumulate([oseg.toOneChain() for oseg in self])
def opp(self, name = ""):
"""Return the opposite path."""
return _Path(reversed([oseg.opp() for oseg in self]),
name, self.iscycle)
def subPath(self, start, end):
"""Returns a part of this path between start and end. If start <= end,
then the returned path is self[start:end]. If start > end, then the
returned path is the reverse of self[end:start]. One can imagine
labeling the start of segment i in the path as point i, then this
function returns the part of the path between points start and end,
with the appropriate orientation.
This function will correctly handle -1 (same as length of path).
"""
if start == -1: start = len(self)
if end == -1: end = len(self)
if start <= end:
return _Path(self[start:end])
else:
return _Path(self[end:start]).opp()
class _Cell(object):
"""Represents an oriented cell."""
def __init__(self, name, boundary):
"""Specify the name of the cell, and the boundary as a Path (or a list
of Path) with ``iscycle`` set to ``True``.
"""
self.name = name
if isinstance(boundary, _Path):
self.boundary = [boundary]
else:
self.boundary = boundary
for b in self.boundary:
assert b.iscycle, "Boundary of cell must be a cycle"
def __str__(self):
return str(self.boundary[0])
def __repr__(self):
if self.name == "":
return "cell(%s)" % str(self.boundary[0])
else:
return "cell(%s: %s)" % (str(self.name), str(self.boundary[0]))
def toDomain(self):
"""Return the domain of this cell."""
return _Domain({self : 1})
def bdOneChain(self):
"""Return the one chain represented by its boundary."""
return _OneChain().accumulate(
[bd.toOneChain() for bd in self.boundary])
class _Domain(SummableDict):
"""A signed sum of oriented cells, represented as a dictionary from Cell
to integers.
"""
def diff(self):
"""Return the boundary of this domain as a one-chain."""
return _OneChain().accumulate([coeff * cell.bdOneChain()
for cell, coeff in list(self.items())])
class _OneChain(SummableDict):
"""A signed sum of oriented segments, represented as a dictionary from
Segment to integers.
"""
def diff(self):
"""Return the boundary of this one-chain as a zero-chain."""
return _ZeroChain().accumulate([coeff * seg.bdZeroChain()
for seg, coeff in list(self.items())])
class _ZeroChain(SummableDict):
"""A signed sum of points, represented as a dictionary from Point to
integers.
"""
pass
class HFGenerator(object):
"""Represents a generator of the Heegaard Floer chain complex for a certain
Heegaard diagram.
"""
def __init__(self, parent, points):
"""Specifies the parent Heegaard diagram, and a list of points in that
diagram.
"""
self.parent = parent
self.points = points
def getIdem(self):
"""Returns the idempotent of this generator. For corresponding function
in HeegaardDiagram class for details.
"""
return self.parent.getGeneratorIdem(self)
def getIdemSize(self):
"""Returns the size of the type A idempotent on the first PMC. In the
one border case this is always the genus. In the two border cases
generators are grouped by this size.
"""
idems = self.getIdem()
if len(idems) == 0:
return 0 # closed diagram.
return len(idems[0])
def getDIdem(self):
"""Returns the complement of the idempotent of this generator, in the
opposite PMC. (used in type D and type DD structures)
"""
idem = self.getIdem()
if isinstance(idem, Idempotent):
return idem.opp().comp()
else:
return [i.opp().comp() for i in idem]
def toZeroChain(self):
"""Returns the zero-chain of this generator."""
return _ZeroChain().accumulate([_ZeroChain({pt : 1})
for pt in self.points])
class HeegaardDiagram(object):
"""Represents a possibly bordered Heegaard Diagram."""
def __init__(self, name, points, segments, cells, alpha, beta, border,
basept):
"""Specify the information needed to build a bordered Heegaard diagram.
Elements of border, and elements of alpha and beta representing cycles
must be Path with ``iscycle`` set to ``True``. Elements of alpha and
beta representing arcs must be Path with ``iscycle`` set to ``False``.
"""
self.name = name
self.points = points
self.segments = segments
self.cells = cells
self.alpha = tolist(alpha)
self.beta = tolist(beta)
self.border = tolist(border)
self.basept = tolist(basept)
self.num_point = len(self.points)
self.num_segment = len(self.segments)
self.num_cell = len(self.cells)
# Separate both alpha and beta lists into cycles and arcs
self.alpha_cycles, self.alpha_arcs = [], []
self.beta_cycles, self.beta_arcs = [], []
for path in self.alpha:
if path.iscycle: self.alpha_cycles.append(path)
else: self.alpha_arcs.append(path)
for path in self.beta:
if path.iscycle: self.beta_cycles.append(path)
else: self.beta_arcs.append(path)
assert len(self.alpha_arcs) % 2 == 0 and len(self.beta_arcs) % 2 == 0
# Compute genus = number of cycles + (number of arcs / 2)
self.genus = len(self.alpha_cycles) + (len(self.alpha_arcs) // 2)
assert self.genus == len(self.beta_cycles) + (len(self.beta_arcs) // 2)
# Compute pmc_genus = length of border / 4
assert all([len(path) % 4 == 0 for path in self.border])
self.pmc_genus = [len(path) // 4 for path in self.border]
# For each point, compute its position in the alpha, beta, or border
# paths. Represent them as tuples in the alpha_pos, beta_pos, and
# border_pos fields of each point. The field is None if not applicable
# for that point. Each tuple is (path index, point index within path).
# Both indices start at zero.
# For each segment, record its type (one of ALPHA, BETA, and BORDER),
# and info: the tuple (path index, index within path, and orientation).
for pt in points:
pt.alpha_info = pt.beta_info = pt.border_info = None
for alpha_id in range(len(self.alpha)):
alpha_path = self.alpha[alpha_id]
for oseg_id in range(len(alpha_path)):
oseg = alpha_path[oseg_id]
oseg.seg.type = ALPHA
oseg.seg.info = (alpha_id, oseg_id, oseg.orientation)
oseg.start.alpha_info = (alpha_id, oseg_id)
if not alpha_path.iscycle:
alpha_path[-1].end.alpha_info = (alpha_id, len(alpha_path))
for beta_id in range(len(self.beta)):
beta_path = self.beta[beta_id]
for oseg_id in range(len(beta_path)):
oseg = beta_path[oseg_id]
oseg.seg.type = BETA
oseg.seg.info = (beta_id, oseg_id, oseg.orientation)
oseg.start.beta_info = (beta_id, oseg_id)
if not beta_path.iscycle:
beta_path[-1].end.beta_info = (beta_id, len(beta_path))
for border_id in range(len(self.border)):
border_path = self.border[border_id]
for oseg_id in range(len(border_path)):
oseg = border_path[oseg_id]
oseg.seg.type = BORDER
oseg.seg.info = (border_id, oseg_id, oseg.orientation)
oseg.start.border_info = (border_id, oseg_id)
assert border_path.iscycle
# Compute the PMC on each border. In addition, associate to each arc
# the tuple (border_id, idem_pair) in attribute idem_info
self.num_pmc = len(self.border)
pmc_matchings = [[] for i in range(self.num_pmc)]
for path in self.alpha_arcs + self.beta_arcs:
start_pt = path[0].start
end_pt = path[-1].end
start_border_id, start_border_pos = start_pt.border_info
end_border_id, end_border_pos = end_pt.border_info
assert start_border_id == end_border_id
idem_pair = (start_border_pos, end_border_pos)
pmc_matchings[start_border_id].append(idem_pair)
path.idem_info = (start_border_id, idem_pair)
assert all([self.pmc_genus[i] == len(pmc_matchings[i]) // 2
for i in range(len(pmc_matchings))])
self.pmc_list = [PMC(matching) for matching in pmc_matchings]
# Compute the list of generators
self._computeHFGenerators()
# Construct row system of domains
self._computeRowSystem()
def _computeHFGenerators(self):
"""Compute the list of generators. Produces _generator_list attribute
in self. This is a list of generators if there are less than two
borders, and a list of lists of generators, grouped by size of
idempotent on the first border.
"""
self.two_border_case = (len(self.border) == 2)
if not self.two_border_case:
self._generator_list = []
else:
self._generator_list = [[] for i in range(self.pmc_genus[0]*2+1)]
def helper(pts_chosen, pt_pos, alpha_occupied, beta_occupied):
"""pts_chosen is the list of points in the current partial
generator. pt_pos is the index of the next point to be considered
for inclusion. alpha_occupied is a list of booleans on whether each
alpha cycle / arc is already used. Likewise for beta_occupied.
"""
if len(pts_chosen) == self.genus:
# Check each alpha cycle and beta cycle are used
for i in range(len(self.alpha)):
if self.alpha[i].iscycle and not alpha_occupied[i]:
return
for i in range(len(self.beta)):
if self.beta[i].iscycle and not beta_occupied[i]:
return
# Can add this generator
hf_gen = HFGenerator(self, pts_chosen)
if not self.two_border_case:
self._generator_list.append(hf_gen)
return
else:
self._generator_list[hf_gen.getIdemSize()].append(hf_gen)
return
# Need to add more points
if pt_pos == len(self.points):
return
helper(pts_chosen, pt_pos+1, alpha_occupied,
beta_occupied)
cur_pt = self.points[pt_pos]
if cur_pt.border_info != None:
# Cannot add points on the border
return
cur_alpha_id = cur_pt.alpha_info[0]
cur_beta_id = cur_pt.beta_info[0]
if alpha_occupied[cur_alpha_id] or beta_occupied[cur_beta_id]:
return
# Attempt to add this point
alpha_occupied[cur_alpha_id] = beta_occupied[cur_beta_id] = True
helper(pts_chosen+[cur_pt], pt_pos+1, alpha_occupied,
beta_occupied)
alpha_occupied[cur_alpha_id] = beta_occupied[cur_beta_id] = False
# Finally call with initial conditions
helper([], 0, [False]*len(self.alpha), [False]*len(self.beta))
def _computeRowSystem(self):
"""Construct the row system of segments associated to the diagram. Each
row is a vector in R^n, where n is the number of interior segments.
Segments on the border are ignored. Each row corresponds to either a
non-basepoint cell or an alpha/beta path. This is used to find both
periodic domains (linear relations on the rows) and domains connecting
two generators (represent the boundary as a linear combination of
rows).
Besides the row system self.row_sys, this function constructs the
following:
*. used_cells: list of non-basepoint cells. The indices of cells in
this array agree with their indices in the row system.
*. num_used_cell: number of non-basepoint cells.
*. interior_segs: non-border segments. The indices of segments in this
array agree with their (column) indices in the row system.
*. seg_to_id: translating segments to their index in interior_segs.
*. num_interior_seg: number of interior segments.
"""
# Construct list of non-basepoint cells.
self.used_cells = list(set(self.cells) - set(self.basept))
self.num_used_cell = len(self.used_cells)
# Construct list of interior segments, as well as a dictionary
# translating segments to indices.
self.interior_segs = [seg for seg in self.segments
if seg.type != BORDER]
self.seg_to_id = dict()
self.num_interior_seg = len(self.interior_segs)
for i in range(self.num_interior_seg):
self.seg_to_id[self.interior_segs[i]] = i
rows = [cell.bdOneChain() for cell in self.used_cells] + \
[path.toOneChain() for path in self.alpha + self.beta]
vec_rows = []
for row in rows:
cur_vec = [0] * self.num_interior_seg
for seg, coeff in list(row.items()):
if seg.type != BORDER:
cur_vec[self.seg_to_id[seg]] += coeff
vec_rows.append(cur_vec)
self.row_sys = RowSystem(vec_rows)
def __str__(self):
return "HeegaardDiagram(%s)" % str(self.name)
def __repr__(self):
output = str(self)
output += "\n\nPoints: %s" % str(self.points)
output += "\n\nSegments: %s" % str(self.segments)
output += "\n\nCells: %s" % str(self.cells)
output += "\n\nBase point: %s" % repr(self.basept[0])
output += "\n"
return output
def getPMCs(self):
"""Return the PMC's (zero, one, or two) associated to each border."""
return self.pmc_list
def restrictOneChain(self, one_chain, seg_type):
"""Restrict the one chain to include segments of only the given segment
type.
"""
return _OneChain().accumulate([_OneChain({seg : coeff})
for seg, coeff in list(one_chain.items())
if seg.type == seg_type])
def restrictZeroChain(self, zero_chain):
"""Restrict the zero chain to include only points in the interior."""
return _ZeroChain().accumulate([_ZeroChain({pt : coeff})
for pt, coeff in list(zero_chain.items())
if pt.border_info is None])
def getHFGenerators(self, idem_size = None):
"""Get generators of the Heegaard Floer chain complex associated to
this Heegaard diagram. Each generator is a tuple of self.genus interior
points. To be a valid generator, each cycle must contain exactly one
point, and each arc must contain at most one point. By default, exactly
half of the arcs on each side contain points. This can be changed by
specifying idem_size, which is the number of arcs on the first border
(usually considered the left border) that contain points.
"""
if not self.two_border_case:
# idem_size is meaningless then
assert idem_size is None
return self._generator_list
else:
if idem_size is None:
# Defaults to half of the arcs occupied
idem_size = self.pmc_genus[0]
return self._generator_list[idem_size]
def getGeneratorByIdem(self, idem, D_idem = False):
"""Returns the first generator with the given idempotent. Returns None
if none exist. Note this takes time linear in the number of generators.
(can be improved)
"""
if D_idem:
idem_size = self.pmc_list[0].num_pair - len(idem[0])
else:
idem_size = len(idem[0])
for gen in self.getHFGenerators(idem_size):
if not D_idem and gen.getIdem() == idem:
return gen
if D_idem and gen.getDIdem() == idem:
return gen
return None
def getGeneratorIdem(self, generator):
"""Find the idempotent of this generator. This gives the idempotent of
arcs occupied by this generator (that is, type A idempotents), in the
same PMC's as that will be returned by getPMCs().
"""
num_border = len(self.border)
raw_idems = [[] for i in range(num_border)]
for pt in generator.points:
assert pt.alpha_info is not None and pt.beta_info is not None
alpha_path = self.alpha[pt.alpha_info[0]]
beta_path = self.beta[pt.beta_info[0]]
for path in (alpha_path, beta_path):
if not path.iscycle: # is an arc that goes to the boundary
border_id, pair = path.idem_info
pairid = self.pmc_list[border_id].pairid[pair[0]]
raw_idems[border_id].append(pairid)
idems = []
for i in range(num_border):
idems.append(Idempotent(self.pmc_list[i], raw_idems[i]))
return idems
def getPeriodicDomains(self):
"""Get the list of periodic domains."""
vec_domains = self.row_sys.getZeroComb()
domains = []
for vec_domain in vec_domains:
domains.append(_Domain().accumulate(
[self.used_cells[i].toDomain() * vec_domain[i]
for i in range(self.num_used_cell)]))
return domains
def getConnectingDomain(self, gen1, gen2):
"""Get a domain connecting two generators. Note the returned value is
not uniquely specified (only unique up to periodic domains).
The alpha part of the boundary of this domain goes from gen1 to gen2,
while the beta part goes from gen2 to gen1.
"""
bdChain = _OneChain()
alpha_id_to_pos = dict()
beta_id_to_pos = dict()
# For each point in gen1: if that point is on an arc, immediately add
# the appropriate partial arc. Otherwise record the position on the
# cycle.
for pt in gen1.points:
alpha_id, oseg_id = pt.alpha_info
alpha_path = self.alpha[alpha_id]
if alpha_path.iscycle:
alpha_id_to_pos[alpha_id] = oseg_id
else:
bdChain += alpha_path.subPath(oseg_id, -1).toOneChain()
beta_id, oseg_id = pt.beta_info
beta_path = self.beta[beta_id]
if beta_path.iscycle:
beta_id_to_pos[beta_id] = oseg_id
else:
bdChain += beta_path.subPath(0, oseg_id).toOneChain()
# For each point on gen2: if that point is on an arc, add the partial
# arc like before. Otherwise, use the dictionaries constructed for gen1
# to find the part of cycle to add
for pt in gen2.points:
alpha_id, oseg_id = pt.alpha_info
alpha_path = self.alpha[alpha_id]
if alpha_path.iscycle:
bdChain += alpha_path.subPath(
alpha_id_to_pos[alpha_id], oseg_id).toOneChain()
else:
bdChain += alpha_path.subPath(0, oseg_id).toOneChain()
beta_id, oseg_id = pt.beta_info
beta_path = self.beta[beta_id]
if beta_path.iscycle:
bdChain += beta_path.subPath(
oseg_id, beta_id_to_pos[beta_id]).toOneChain()
else:
bdChain += beta_path.subPath(oseg_id, -1).toOneChain()
# Now translate bdChain to vectors in the row system.
bd_vec = [0] * self.num_interior_seg
for seg, coeff in list(bdChain.items()):
assert seg.type != BORDER
bd_vec[self.seg_to_id[seg]] += coeff
vec_domain = self.row_sys.getComb(bd_vec)
if vec_domain is None:
return None
else:
return _Domain().accumulate(
[self.used_cells[i].toDomain() * vec_domain[i]
for i in range(self.num_used_cell)])
def getMaslov(self, domain, x, y):
"""Returns the Maslov grading of a domain connecting generators x and
y. This is defined as:
\mu(B) = -e(B) - n_x(B) - n_y(B),
where e(B) is the Euler measure, and n_x, n_y are average
multiplicities of points in the generators on the domain.
"""
# We will use the fact that each corner of an individual cell counts as
# a right angle (add 1/4 to n_x and n_y).
maslov = Fraction(0)
xpts, ypts = x.points, y.points
for cell, coeff in list(domain.items()):
cell_maslov = 1 - len(cell.boundary) # correction to e(B)
for boundary in cell.boundary:
cell_maslov += (4-len(boundary)) / Fraction(4) # e(B)
for oseg in boundary:
if oseg.end in xpts:
cell_maslov += Fraction(1,4) # n_x(B)
if oseg.end in ypts:
cell_maslov += Fraction(1,4) # n_y(B)
maslov -= cell_maslov * coeff
return maslov
def getBigGrading(self, domain, x, y):
"""Returns the big grading (that is, not refined) of a domain
connecting generators x and y. This has a Maslov part and a Spin-c
part that records the part of boundary of domain on the border. This
should be used only when the diagram has at least one border. The
returned value is a list of BigGradingElement, one for each border (and
PMC). The first grading contains the Maslov part, the remaining ones
have Maslov part zero.
"""
maslov = self.getMaslov(domain, x, y)
multiplicity = []
pmcs = self.getPMCs()
for pmc in pmcs:
multiplicity.append([0] * (pmc.n-1))
bd_border = self.restrictOneChain(domain.diff(), BORDER)
for seg, coeff in list(bd_border.items()):
assert seg.type == BORDER
border_id, oseg_id, orientation = seg.info
# Cannot be the edge with the basepoint
assert oseg_id != pmcs[border_id].n - 1
multiplicity[border_id][oseg_id] += orientation * coeff
result = []
for i in range(len(pmcs)):
result.append(pmcs[i].big_gr(0, multiplicity[i]))
result[0].maslov = maslov
return result
def getBigGradingD(self, domain, x, y):
"""Returns the big grading of domain used in type D structures (with
Ropp()).
"""
return [a.Ropp() for a in self.getBigGrading(domain, x, y)]
def getSmallGradingD(self, domain, x, y):
"""Returns the small (refined) grading of a domain connecting
generators x and y. Similar to and uses getBigGrading as a first step.
"""
big_gr = self.getBigGradingD(domain, x, y)
small_gr = []
for i in range(len(big_gr)):
if len(x.getIdem()) == 2:
# In 2 boundary component case, find size of the current
# (D-side) idempotent.
idem_size = len(x.getDIdem()[i])
else:
idem_size = None
cur_big_gr = big_gr[i]
cur_pmc_opp = self.pmc_list[i].opp()
refine_data = DEFAULT_REFINEMENT(cur_pmc_opp, idem_size)
phix, phiy = [refine_data[p.getDIdem()[i]] for p in (x, y)]
small_gr.append(
(phiy * cur_big_gr * phix.inverse()).toSmallGrading())
return small_gr
def computeDGrading(self, base_gen, base_gr = None):
"""Compute type D grading for each generator, with the grading of
generator base_gen set to base_gr (defaults to zero grading).
This function assumes that the diagram has one border, and that all
generators are linked by domains. The return value is the pair
consisting of the grading set and the dictionary mapping generators to
elements of a SimpleGradingSet.
"""
if DEFAULT_GRADING == BIG_GRADING:
gr_fun, gr_group_cls = self.getBigGradingD, BigGradingGroup
else:
gr_fun, gr_group_cls = self.getSmallGradingD, SmallGradingGroup
# First construct grading set (mainly construct gradings of periodic
# domains)
assert len(self.pmc_list) == 1
pmc = self.pmc_list[0]
pmc_opp = pmc.opp()
if base_gr is None:
base_gr = gr_group_cls(pmc_opp).zero()
periodic_domains = self.getPeriodicDomains()
domains_gr = [gr_fun(domain, base_gen, base_gen)[0]
for domain in periodic_domains]
domains_gr = [base_gr.inverse() * domain_gr * base_gr
for domain_gr in domains_gr]
gr_set = SimpleGradingSet(gr_group_cls(pmc_opp), ACTION_LEFT,
domains_gr)
# Now compute grading of each generator by finding domains connecting
# it to the base generator.
result = dict()
for gen in self.getHFGenerators():
conn_domain = self.getConnectingDomain(base_gen, gen)
domain_gr = gr_fun(conn_domain, base_gen, gen)[0]
domain_gr = domain_gr * base_gr
result[gen] = SimpleGradingSetElement(gr_set, domain_gr)
return (gr_set, result)
def computeDDGrading(self, base_gen, base_gr = None):
"""Compute type DD grading for each generator, with the grading of
generator base_gen set to base_gr (defaults to zero grading).
This function assumes that the diagram has two borders, and that all
generators are linked by domains. The return value is the pair
consisting of the grading set and the dictionary mapping generators to
elements of a SimpleDbGradingSet.
"""
if DEFAULT_GRADING == BIG_GRADING:
gr_fun, gr_group_cls = self.getBigGradingD, BigGradingGroup
else:
gr_fun, gr_group_cls = self.getSmallGradingD, SmallGradingGroup
# First construct grading set (mainly construct gradings of periodic
# domains)
assert len(self.pmc_list) == 2
pmc1, pmc2 = self.pmc_list
pmc1_opp, pmc2_opp = pmc1.opp(), pmc2.opp()
if base_gr is None:
base_gr = [gr_group_cls(pmc1_opp).zero(),
gr_group_cls(pmc2_opp).zero()]
periodic_domains = self.getPeriodicDomains()
domains_gr = [gr_fun(domain, base_gen, base_gen)
for domain in periodic_domains]
domains_gr = [[base_gr[0].inverse() * domain_gr0 * base_gr[0],
base_gr[1].inverse() * domain_gr1 * base_gr[1]]
for domain_gr0, domain_gr1 in domains_gr]
gr_set = SimpleDbGradingSet(gr_group_cls(pmc1_opp), ACTION_LEFT,
gr_group_cls(pmc2_opp), ACTION_LEFT,
domains_gr)
# Now compute grading of each generator by finding domains connecting
# it to the base generator.
result = dict()
for gen in self.getHFGenerators(base_gen.getIdemSize()):
conn_domain = self.getConnectingDomain(base_gen, gen)
domain_gr0, domain_gr1 = gr_fun(conn_domain, base_gen, gen)
domain_gr = [domain_gr0 * base_gr[0], domain_gr1 * base_gr[1]]
result[gen] = SimpleDbGradingSetElement(gr_set, domain_gr)
return (gr_set, result)
def computeDAGrading(self, base_gen, base_gr = None):
"""Compute type DA grading for each generator. The type DA grading is
related in a straightforward way to the type DD grading. So see the
function computeDDGrading for details.
"""
if base_gr is not None:
return NotImplemented # code the other case later
dd_gr_set, dd_result = self.computeDDGrading(base_gen, base_gr)
lr_domains = [(d1, d2.Ropp()) for d1, d2 in dd_gr_set.periodic_domains]
gr_set = SimpleDbGradingSet(
dd_gr_set.gr_group1, ACTION_LEFT,
dd_gr_set.gr_group2.opp(), ACTION_RIGHT, lr_domains)
result = dict()
for x, gr_x in list(dd_result.items()):
gr_x1, gr_x2 = gr_x.data
gr_x_lr = SimpleDbGradingSetElement(gr_set, (gr_x1, gr_x2.Ropp()))
result[x] = gr_x_lr
return (gr_set, result)
def diagramFromCycleInfo(name, num_interior_point = 0, length_border = [],
alpha_cycles = [], alpha_arcs = [],
beta_cycles = [], beta_arcs = [],
crossing_orientation = [], cell_info = [],
basept = []):
"""Building a Heegaard diagram by specifying points on each cycle. The
meanings of each argument is as follows:
*. ``num_interior_point`` - number of interior points.
*. ``length_border`` - list consisting of number of points on each border.
The points will be numbered in the counter-clockwise direction.
*. ``alpha_cycles`` - list of list of points on each alpha cycle.
*. ``alpha_arcs`` - list of list of points on each alpha arc, except for
the first and last element of each list, which is a pair ``(b, n)`` where
``b`` is the index of the border and ``n`` is the index of the point on the
border. Indices start at 0 and increases in the counterclockwise direction.
*. ``beta_cycles`` - similar to ``alpha_cycles``.
*. ``beta_arcs`` - similar to ``alpha_arcs``.
*. ``crossing_orientation`` - orientation at each interior point. +1 means
if the alpha cycle/arc goes from left to right, then the beta cycle/arc
goes from bottom to top. Otherwise -1.
*. ``cell_info`` - give names for a selection of cells. Each element is
one of the following:
**. ((3, 5), "a") - points 3 and 5 are consecutive points on the
boundary of the cell "a", going counter-clockwise.
**. ([(3, 5), (4, 6)], "a") - cell "a" has two boundaries.
**. (("b", 4), "a") - part of cell "a" is the first border, from point
4 to point 5.
**. (("b2", 4), "a") - part of cell "a" is the second border. "b1" is
also recognized.
*. ``basept`` - Specify the basepoint cell. See ``cell_info`` for giving
the cell. Can also give name of the cell in place of boundary information.
For bordered diagrams the obvious basepoint cell is automatically set.
"""
# Construct the set of points
interior_points = [_Point("p%d" % i) for i in range(num_interior_point)]
border_points = [[_Point("b%d,%d" % (i, j))
for j in range(length_border[i])]
for i in range(len(length_border))]
points = interior_points + sum(border_points, [])
def getPt(param):
# Single integer for interior points, and pair for boundary points
if isinstance(param, int):
return interior_points[param]
else:
return border_points[param[0]][param[1]]
# Construct the set of segments and paths
interior_segs = []
border_segs = []
alpha = []
beta = []
border = []
def processPath(ptIDlist, prefix, iscycle):
# Given a list of points, generate segments connecting consecutive
# points, as well as a path connecting all.
ptlist = [getPt(param) for param in ptIDlist]
newsegs = []
for i in range(len(ptlist)-1):
newsegs.append(_Segment("%s,%d" % (prefix, i),
ptlist[i], ptlist[i+1]))
if iscycle:
newsegs.append(_Segment("%s,%d" % (prefix, len(ptlist)-1),
ptlist[-1], ptlist[0]))
newpath = _Path([seg.oseg() for seg in newsegs], prefix, iscycle)
return newsegs, newpath
for i in range(len(alpha_cycles)):
newsegs, newpath = processPath(alpha_cycles[i], "ac%d"%i, True)
interior_segs.extend(newsegs)
alpha.append(newpath)
for i in range(len(alpha_arcs)):
newsegs, newpath = processPath(alpha_arcs[i], "aa%d"%i, False)
interior_segs.extend(newsegs)
alpha.append(newpath)
for i in range(len(beta_cycles)):
newsegs, newpath = processPath(beta_cycles[i], "bc%d"%i, True)
interior_segs.extend(newsegs)
beta.append(newpath)
for i in range(len(beta_arcs)):
newsegs, newpath = processPath(beta_arcs[i], "ba%d"%i, False)
interior_segs.extend(newsegs)
beta.append(newpath)
for i in range(len(length_border)):
curpath = [(i, j) for j in range(length_border[i])]
newsegs, newpath = processPath(curpath, "bd%d"%i, True)
border_segs.append(newsegs)
border.append(newpath)
segments = interior_segs + sum(border_segs, [])
# For each point, compute its three or four neighbors.
alphain, alphaout, betain, betaout, borderin, borderout = \
({},{},{},{},{},{})
for seg in segments:
if seg.name[1] == 'd': # border
borderout[seg.start] = seg
borderin[seg.end] = seg
elif seg.name[0] == 'a': # alpha
alphaout[seg.start] = seg
alphain[seg.end] = seg
else: # beta
betaout[seg.start] = seg
betain[seg.end] = seg
# Construct the map from oriented segments to the next one in the same cell
# in the counterclockwise direction.
nextseg = {}
for i in range(num_interior_point):
pt = interior_points[i]
ori = crossing_orientation[i]
if ori == 1:
nextseg[alphain[pt].oseg()] = betaout[pt].oseg()
nextseg[betaout[pt].orseg()] = alphaout[pt].oseg()
nextseg[alphaout[pt].orseg()] = betain[pt].orseg()
nextseg[betain[pt].oseg()] = alphain[pt].orseg()
else: # ori == -1
nextseg[alphain[pt].oseg()] = betain[pt].orseg()
nextseg[betain[pt].oseg()] = alphaout[pt].oseg()
nextseg[alphaout[pt].orseg()] = betaout[pt].oseg()
nextseg[betaout[pt].orseg()] = alphain[pt].orseg()
for cur_border in border_points:
for pt in cur_border:
segoutborder = None # Oriented segment pointing out of border
if pt in alphaout:
segoutborder = alphaout[pt].oseg()
elif pt in alphain:
segoutborder = alphain[pt].orseg()
elif pt in betaout:
segoutborder = betaout[pt].oseg()
else: # betain.has_key(pt)
segoutborder = betain[pt].orseg()
nextseg[borderin[pt].oseg()] = segoutborder
nextseg[segoutborder.opp()] = borderout[pt].oseg()
# Construct cells
cells = []
# Start with a list of oriented segments that can be used as boundary of
# cells.
bdcells = set([seg.oseg() for seg in segments] + \
[seg.orseg() for seg in interior_segs])
while bdcells:
startbd = bdcells.pop()
curboundary = [startbd]
nextbd = nextseg[startbd]
while nextbd != startbd:
curboundary.append(nextbd)
bdcells.remove(nextbd)
nextbd = nextseg[nextbd]
cells.append(_Cell("c%d" % (len(cells)+1),
_Path(curboundary, iscycle = True)))
# Construct dictionary from oriented segment to the unique cell for which
# it is in the counterclockwise boundary.
segtocell = {}
for cell in cells:
for boundary in cell.boundary:
for oseg in boundary:
segtocell[oseg] = cell
# Find the basepoint cell
allbasept = []
if len(border) > 0:
allbasept.append(segtocell[border[0][-1]])
return HeegaardDiagram(name, points, segments, cells, alpha, beta, border,
allbasept)
def getZeroFrameDiagram(genus):
"""Get Heegaard diagram for the 0-framed handlebody of the given genus. The
left (and only) boundary will have splitPMC(genus) (really the opposite of
that, but that is the same thing).
"""
a_arcs = []
b_cycles = []
for i in range(genus):
a_arcs.append([(0,4*i), i, (0,4*i+2)])
a_arcs.append([(0,4*i+1), (0,4*i+3)])
b_cycles.append([i])
return diagramFromCycleInfo("0-framed handlebody of genus %d" % genus,
num_interior_point = genus,
length_border = [4*genus],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1]*genus)
def getZeroFrameAdmDiagram(genus):
"""Get Heegaard diagram for the 0-framed handlebody of a given genus, with
each beta circle isotopied to cross another alpha arc two times.
"""
# TODO: figure out grading of the associated type D structure from this
# diagram? (Can also figure out grading using algebra maps there).
a_arcs = []
b_cycles = []
for i in range(genus):
a_arcs.append([(0,4*i), 3*i, (0,4*i+2)])
a_arcs.append([(0,4*i+1), 3*i+2, 3*i+1, (0,4*i+3)])
b_cycles.append([3*i,3*i+1,3*i+2])
return diagramFromCycleInfo("0-framed handlebody of genus %d" % genus,
num_interior_point = 3*genus,
length_border = [4*genus],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1,1,-1]*genus)
def getInfFrameDiagram(genus):
"""Get Heegaard diagram for the inf-framed handlebody of the given genus.
The boundary is as in the 0-framed handlebody.
"""
a_arcs = []
b_cycles = []
for i in range(genus):
a_arcs.append([(0,4*i), (0,4*i+2)])
a_arcs.append([(0,4*i+1), i, (0,4*i+3)])
b_cycles.append([i])
return diagramFromCycleInfo("Inf-framed handlebody of genus %d" % genus,
num_interior_point = genus,
length_border = [4*genus],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1]*genus)
def getPlatDiagram(genus):
"""Get Heegaard diagram for the plat handlebody of the given genus. The
left boundary will have linearPMC(genus).
"""
a_arcs = []
b_cycles = []
a_arcs.append([(0,0), (0,2)])
a_arcs.append([(0,4*genus-3), 2*genus-2, (0,4*genus-1)])
b_cycles.append([0])
for i in range(genus-1):
a_arcs.append([(0,4*i+1), 2*i, 2*i+1, (0,4*i+4)])
a_arcs.append([(0,4*i+3), (0,4*i+6)])
b_cycles.append([2*i+1, 2*i+2])
return diagramFromCycleInfo("Plat handlebody of genus %d" % genus,
num_interior_point = 2*genus-1,
length_border = [4*genus],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1,-1]*(genus-1)+[1])
def getIdentityDiagram(pmc):
"""Get Heegaard diagram for the identity cobordism of the given PMC. The
left (first) boundary will match the opposite of pmc, and the right
(second) boundary will match pmc itself.
"""
n = pmc.n
a_arcs = []
b_cycles = []
for i in range(pmc.num_pair):
p, q = pmc.pairs[i]
op, oq = n-1-p, n-1-q
pt1, pt2 = 2*i, 2*i+1
a_arcs.append([(1,p), pt1, (1,q)])
a_arcs.append([(0,oq), pt2, (0,op)])
b_cycles.append([pt1, pt2])
return diagramFromCycleInfo("Identity cobordism for %s" % repr(pmc),
num_interior_point = n, length_border = [n,n],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1,-1]*pmc.num_pair)
def getArcslideDiagram(slide):
"""Get Heegaard diagram for the given arcslide. The left (first) boundary
has the opposite of starting pmc. The right (second) boundary has ending
pmc.
"""
pmc1, pmc2 = slide.start_pmc, slide.end_pmc
b_pair, c_pair = slide.b_pair, slide.c_pair
n, num_pair = pmc1.n, pmc1.num_pair
a_arcs = []
b_cycles = []
for i in range(num_pair):
if i not in (b_pair, c_pair):
# Draw arcs for ordinary pairs
p, q = pmc1.pairs[i] # index of points at left, from bottom
rp, rq = slide.to_r[p], slide.to_r[q] # index at right, from bottom
assert rp < rq
pt1, pt2 = 2*i, 2*i+1
a_arcs.append([(1,rp), pt1, (1,rq)])
op, oq = n-1-p, n-1-q # index at left, from top
a_arcs.append([(0,oq), pt2, (0,op)])
b_cycles.append([pt1, pt2])
# Now draw the B and C pairs.
bpt1, bpt2 = 2*b_pair, 2*b_pair+1 # two ordinary points on B pair
cpt1, cpt2 = 2*c_pair, 2*c_pair+1 # two ordinary points on C pair
spt = 2*num_pair # the one point special for this diagram
b1, b2, c1, c2 = slide.b1, slide.b2, slide.c1, slide.c2 # left, from bottom
rb1, rb2, rc1, rc2 = (slide.to_r[b1], slide.to_r[b2],
slide.to_r[c1], slide.to_r[c2]) # right, from bottom
ob1, ob2, oc1, oc2 = n-1-b1, n-1-b2, n-1-c1, n-1-c2
a_arcs.append([(1,rb1), bpt1, (1,rb2)])
a_arcs.append([(0,ob2), bpt2, spt, (0,ob1)])
a_arcs.append([(1,rc1), cpt1, (1,rc2)])
a_arcs.append([(0,oc2), cpt2, (0,oc1)])
b_cycles.append([bpt1, bpt2])
if b1 > c1: # special cycle on c2 -> special pair at bottom
b_cycles.append([cpt1, cpt2, spt])
else:
b_cycles.append([cpt1, spt, cpt2])
# Note orientation at spt is always -1
return diagramFromCycleInfo("Diagram for %s" % repr(slide),
num_interior_point = n+1,
length_border = [n,n],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1,-1]*num_pair+[-1])
def getSimpleCobordismDiagram(start_pmc, insert_pos):
"""Get Heegaard diagram for a simple cobordism (see SimpleCobordismDA in
cobordismda.py).
"""
n, num_pair = start_pmc.n, start_pmc.num_pair
a_arcs, b_cycles = [], []
for i in range(num_pair):
pt1, pt2 = 2*i, 2*i+1
p, q = start_pmc.pairs[i]
rp, rq = p, q
if p >= insert_pos:
rp += 4
if q >= insert_pos:
rq += 4
op, oq = n-1-p, n-1-q # index at left, from top
a_arcs.append([(1, rp), pt1, (1, rq)])
a_arcs.append([(0, oq), pt2, (0, op)])
b_cycles.append([pt1, pt2])
# Now draw the cobordism part, using points 2*num_pair+(0, 1, 2)
pt0, pt1, pt2 = [2*num_pair+i for i in (0, 1, 2)]
a_arcs.append([(1, insert_pos), pt2, pt1, (1, insert_pos+2)])
a_arcs.append([(1, insert_pos+1), pt0, (1, insert_pos+3)])
b_cycles.append([pt0, pt1, pt2])
return diagramFromCycleInfo(
"Diagram for cobordism starting at %s inserting at position %s" % (
start_pmc, insert_pos),
num_interior_point = 2 * num_pair + 3,
length_border = [n, n + 4],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = [1, -1]*num_pair + [-1, 1, -1])
def getCobordismDiagramLeft(cob):
"""Get Heegaard diagram for a cobordism on the linear PMC, with larger PMC
on the left. cob is of type Cobordism (cobordism.py)
"""
genus_l = cob.genus
genus_r = genus_l - 1
num_pair_l = genus_l * 2
start_pmc, end_pmc = cob.large_pmc, cob.small_pmc
start_n = start_pmc.n
pt_count = [0]
a_arcs, b_cycles = [], []
def process_pair(left_pair):
# Given the index of a pair on the left, draw the same a_arcs and
# b_cycles as in the identity diagram for that pair.
p, q = start_pmc.pairs[left_pair]
rp, rq = cob.to_s[p], cob.to_s[q]
op, oq = start_n-1-p, start_n-1-q
pt1, pt2 = pt_count[0], pt_count[0] + 1
pt_count[0] += 2
a_arcs.append([(1, rp), pt1, (1, rq)])
a_arcs.append([(0, oq), pt2, (0, op)])
b_cycles.append([pt1, pt2])
if cob.is_degenerate:
c_pair, p_pair = cob.c_pair, cob.p_pair
for i in range(num_pair_l):
if i not in (c_pair, p_pair):
process_pair(i)
# Now consider the c-pair and p-pair
c1, c2 = start_pmc.pairs[c_pair]
p1, p2 = start_pmc.pairs[p_pair]
oc1, oc2, op1, op2 = [start_n-1-p for p in (c1, c2, p1, p2)]
pt = pt_count[0]
pt_count[0] += 1
a_arcs.append([(0, oc2), (0, oc1)])
a_arcs.append([(0, op2), pt, (0, op1)])
b_cycles.append([pt])
crossing_orientation = [1, -1] * (num_pair_l - 2) + [1]
else: # cob is not degenerate
c_pair, d_pair, u_pair = cob.c_pair, cob.d_pair, cob.u_pair
du_pair = cob.du_pair # for the right side
for i in range(num_pair_l):
if i not in (c_pair, d_pair, u_pair):
process_pair(i)
# Now consider the remaining pairs
c1, c2 = start_pmc.pairs[c_pair]
d1, d2 = start_pmc.pairs[d_pair]
u1, u2 = start_pmc.pairs[u_pair]
rdu1, rdu2 = end_pmc.pairs[du_pair]
oc1, oc2, od1, od2, ou1, ou2 = [start_n-1-p
for p in (c1, c2, d1, d2, u1, u2)]
pt0, pt1, pt2, pt3 = [pt_count[0] + i for i in (0, 1, 2, 3)]
pt_count[0] += 4
a_arcs.append([(0, oc2), (0, oc1)])
a_arcs.append([(0, ou2), pt0, pt2, (0, ou1)])
a_arcs.append([(0, od2), pt3, (0, od1)])
a_arcs.append([(1, rdu1), pt1, (1, rdu2)])
b_cycles.append([pt0, pt1])
b_cycles.append([pt2, pt3])
crossing_orientation = [1, -1] * (num_pair_l - 3) + [-1, 1, 1, -1]
return diagramFromCycleInfo(
"Diagram for cobordism on linear PMC starting at genus %s and "
"reducing pair %s " % (genus_l, c_pair),
num_interior_point = pt_count[0],
length_border = [start_n, start_n - 4],
alpha_arcs = a_arcs, beta_cycles = b_cycles,
crossing_orientation = crossing_orientation)
| 51,765
| 40.445957
| 81
|
py
|
bfh_python
|
bfh_python-master/regression.py
|
"""Regression testing framework
This module will search for scripts in the same directory named
XYZtest.py. Each such script should be a test suite that tests a
module through PyUnit. (As of Python 2.1, PyUnit is included in
the standard library as 'unittest'.) This script will aggregate all
found test suites into one big test suite and run them all at once.
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.4 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import sys, os, re, unittest
def regressionTest():
path = os.path.abspath(os.path.dirname(sys.argv[0]))
files = os.listdir(path)
test = re.compile("test\.py$", re.IGNORECASE)
files = list(filter(test.search, files))
filenameToModuleName = lambda f: os.path.splitext(f)[0]
moduleNames = list(map(filenameToModuleName, files))
modules = list(map(__import__, moduleNames))
load = unittest.defaultTestLoader.loadTestsFromModule
return unittest.TestSuite(list(map(load, modules)))
if __name__ == "__main__":
unittest.main(defaultTest="regressionTest")
| 1,313
| 36.542857
| 68
|
py
|
bfh_python
|
bfh_python-master/cobordismdatest.py
|
"""Unit test for cobordismda.py"""
from cobordismda import *
from ddstructure import identityDD
from pmc import splitPMC
import unittest
class CobordismDATest(unittest.TestCase):
def testLeftCobordismDA(self):
for genus, c_pair in [(2, 0), (2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5)]:
c = Cobordism(genus, c_pair, LEFT)
c_da = CobordismDALeft(c)
dastr = c_da.toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
ddstr = dastr.tensorDD(identityDD(c.end_pmc))
ori_ddstr = c.getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
def testGrading(self):
for genus, c_pair in [(2, 0), (2, 1), (2, 3)]:
c_da = CobordismDALeft(Cobordism(genus, c_pair, LEFT))
c_da.toSimpleDAStructure().checkGrading()
def testRightCobordismDA(self):
for genus, c_pair in [(2, 0), (2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5)]:
c = Cobordism(genus, c_pair, RIGHT)
c_da = CobordismDARight(c)
ddstr = c_da.tensorDD(identityDD(c.end_pmc))
ddstr.simplify()
ori_ddstr = c.getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
class SimpleCobordismDATest(unittest.TestCase):
def testSimpleCobordismDA(self):
for pmc, insert_pos in [
(splitPMC(1), 3),
(splitPMC(1), 1),
]:
c_da = SimpleCobordismDA(pmc, insert_pos)
dastr = c_da.toSimpleDAStructure()
self.assertTrue(dastr.testDelta())
ddstr = dastr.tensorDD(identityDD(c_da.end_pmc))
ddstr.simplify()
ddstr.reindex()
self.assertEqual(len(ddstr.getGenerators()), 2)
self.assertEqual(
sorted(len(gen.delta()) for gen in ddstr.getGenerators()),
[2, 6])
def testGrading(self):
for insert_pos in [1, 2, 3]:
c_da = SimpleCobordismDA(splitPMC(1), insert_pos)
c_da.toSimpleDAStructure().checkGrading()
def slideSeq(self, start_pmc, slides):
# Given a sequence of arcslides tau_1, ... tau_n specified by the
# starting PMC of tau_1 and a list of (b1, c1), compute:
# CFDA(tau_1) * ... CFDA(tau_n) * CFDD(Id).
assert len(slides) > 0
slides_da = []
for b1, c1 in slides:
# Find the list CFDA(tau_i)
slides_da.append(ArcslideDA(Arcslide(start_pmc, b1, c1)))
start_pmc = slides_da[-1].pmc2
dd = identityDD(slides_da[-1].pmc2)
for slide_da in reversed(slides_da):
dd = slide_da.tensorDD(dd)
dd.reindex()
dd.simplify()
return dd
def testComposeMiddle(self):
c_da = SimpleCobordismDA(splitPMC(1), 1)
dd = c_da.tensorDD(self.slideSeq(
c_da.pmc2, [(5, 4), (2, 1), (3, 2), (6, 5)]))
dd.simplify()
c = Cobordism(2, 1, RIGHT)
ori_dd = c.getDDStructure()
self.assertTrue(dd.compareDDStructures(ori_dd))
def testComposeNextTop(self):
c_da = SimpleCobordismDA(splitPMC(1), 3)
dd = c_da.tensorDD(self.slideSeq(c_da.pmc2, [(7, 6)]))
dd.simplify()
c = Cobordism(2, 2, RIGHT)
ori_dd = c.getDDStructure()
self.assertTrue(dd.compareDDStructures(ori_dd))
def testComposeTop(self):
c_da = SimpleCobordismDA(splitPMC(1), 3)
dd = c_da.tensorDD(self.slideSeq(c_da.pmc2, [(6, 5), (7, 6)]))
dd.simplify()
c = Cobordism(2, 3, RIGHT)
ori_dd = c.getDDStructure()
self.assertTrue(dd.compareDDStructures(ori_dd))
def testComposeBottom(self):
c_da = SimpleCobordismDA(splitPMC(1), 1)
dd = c_da.tensorDD(self.slideSeq(c_da.pmc2, [(0, 1)]))
dd.simplify()
c = Cobordism(2, 0, RIGHT)
ori_dd = c.getDDStructure()
self.assertTrue(dd.compareDDStructures(ori_dd))
if __name__ == "__main__":
unittest.main()
| 4,142
| 36.663636
| 78
|
py
|
bfh_python
|
bfh_python-master/ddstructure.py
|
"""Defines type DD structures."""
from algebra import ChainComplex, DGAlgebra, Element, FreeModule, Generator, \
SimpleChainComplex, Tensor, TensorDGAlgebra, TensorIdempotent, \
TensorGenerator
from algebra import expandTensor, simplifyComplex
from algebra import E0
from dstructure import DGenerator, SimpleDGenerator, SimpleDStructure
from grading import GeneralGradingSet, GeneralGradingSetElement, \
SimpleDbGradingSet, SimpleDbGradingSetElement
from hdiagram import getIdentityDiagram
from pmc import Idempotent, Strands, StrandDiagram
from pmc import unconnectSumPMC, unconnectSumStrandDiagram
from utility import MorObject, NamedObject
from utility import memorize
from utility import ACTION_LEFT, ACTION_RIGHT, ASSERT_LEVEL, F2
class DDGenerator(Generator):
"""Represents a generator of type DD structure. Distinguished by (python)
identity.
"""
def __init__(self, parent, idem1, idem2):
"""Every generator has two idempotents (for the two type D actions)."""
Generator.__init__(self, parent)
self.idem1, self.idem2 = idem1, idem2
def toDGenerator(self, new_parent):
"""Convert to a generator of a type D structure over the bialgebra."""
new_idem = TensorIdempotent((self.idem1, self.idem2))
new_gen = DGenerator(new_parent, new_idem)
new_gen.__dict__.update(self.__dict__)
new_gen.parent, new_gen.idem = new_parent, new_idem
return new_gen
def toSimpleDDGenerator(self, name):
"""Convert to a SimpleDDGenerator with the given name. All fields are
preserved, except ``name`` which is overwritten, and _hash_val which is
removed, if present.
"""
new_obj = SimpleDDGenerator(self.parent, self.idem1, self.idem2, name)
new_obj.__dict__.update(self.__dict__)
new_obj.name = name # to make sure original name is overwritten
if hasattr(new_obj, '_hash_val'):
del new_obj._hash_val # reset hash value
return new_obj
class SimpleDDGenerator(DDGenerator, NamedObject):
"""Represents a generator of type DD structure, distinguished by name."""
def __init__(self, parent, idem1, idem2, name):
"""Specifies name in addition."""
DDGenerator.__init__(self, parent, idem1, idem2)
NamedObject.__init__(self, name)
def __str__(self):
return "%s:%s,%s" % (self.name, str(self.idem1), str(self.idem2))
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.parent, self.idem1, self.idem2, self.name))
def toDGenerator(self, new_parent):
# Overloaded in order to convert to SimpleDGenerator
new_idem = TensorIdempotent((self.idem1, self.idem2))
new_gen = SimpleDGenerator(new_parent, new_idem, self.name)
new_gen.__dict__.update(self.__dict__)
new_gen.parent, new_gen.idem, new_gen.name = \
new_parent, new_idem, self.name
return new_gen
class MorDDtoDGenerator(DGenerator, MorObject):
"""Represents a generator of the type D structure of morphisms from a type
DD structure to a type D structure.
"""
def __init__(self, parent, source, coeff, target):
"""Specifies the morphism source -> coeff * target."""
DGenerator.__init__(self, parent, source.idem2.opp())
MorObject.__init__(self, source, coeff, target)
class MorDDtoDDGenerator(Generator, MorObject):
"""Represents a generator of the chain complex of bimodule morphisms from a
type DD structure to a type DD structure.
"""
def __init__(self, parent, source, coeff, target):
"""Specifies the morphism source -> coeff * target. Note coeff has type
TensorDGAlgebra of the two algebras that act on the DD structures.
"""
Generator.__init__(self, parent)
MorObject.__init__(self, source, coeff, target)
class MorDDtoDDComplex(ChainComplex):
"""Represents the complex of type DD morphisms between two type DD
structures.
"""
def __init__(self, ring, source, target):
"""Specifies the source and target DD structures."""
ChainComplex.__init__(self, ring)
assert source.algebra1 == target.algebra1 and \
source.algebra2 == target.algebra2
assert source.side1 == target.side1 and source.side2 == target.side2
self.source = source
self.target = target
def __eq__(self, other):
# Unlike other structures, MorDDtoDDComplex is distinguished by its
# source and target
return self.source == other.source and self.target == other.target
def __ne__(self, other):
return not (self == other)
def __hash__(self, other):
return hash(tuple((self.source, self.target)))
def multiply(self, gen1, gen2):
"""Return the composition of two morphisms."""
if not isinstance(gen1, MorDDtoDDGenerator):
return NotImplemented
if not isinstance(gen2, MorDDtoDDGenerator):
return NotImplemented
assert gen1.parent.target == gen2.parent.source
if gen1.target != gen2.source:
return E0
result = E0
new_parent = MorDDtoDDComplex(
F2, gen1.parent.source, gen2.parent.target)
for gen, coeff in list((gen1.coeff * gen2.coeff).items()):
result += coeff * MorDDtoDDGenerator(
new_parent, gen1.source, gen, gen2.target)
return result
def diff(self, gen):
result = E0
rev_delta = self.source.getReverseDelta()
tensor_alg = TensorDGAlgebra(
(self.source.algebra1, self.source.algebra2))
# Differential of y in (x -> ay)
x, a, y = gen.source, gen.coeff, gen.target
ady = a * y.delta()
for (b1, b2, q), coeff in list(ady.items()):
b = TensorGenerator((b1, b2), tensor_alg)
result += coeff * MorDDtoDDGenerator(self, x, b, q)
# Differential of a
for da_gen, coeff in list(a.diff().items()):
result += coeff * MorDDtoDDGenerator(self, x, da_gen, y)
# Precompose by the differential.
# For each p such that (b1,b2)*x is in dp, add p->((b1,b2)*a)y
for (b1, b2, p), coeff1 in rev_delta[x]:
b = TensorGenerator((b1, b2), tensor_alg)
for ba_gen, coeff2 in list((b*a).items()):
result += coeff1 * coeff2 * MorDDtoDDGenerator(
self, p, ba_gen, y)
return result
def getMappingCone(self, morphism):
"""Returns the mapping cone of a morphism."""
result = SimpleDDStructure(
F2, self.source.algebra1, self.source.algebra2,
self.source.side1, self.source.side2)
gen_map = dict()
for gen in self.source.getGenerators():
gen_map[gen] = SimpleDDGenerator(
result, gen.idem1, gen.idem2, "S_%s" % gen.name)
gen_map[gen].filtration = [0]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for gen in self.target.getGenerators():
gen_map[gen] = SimpleDDGenerator(
result, gen.idem1, gen.idem2, "T_%s" % gen.name)
gen_map[gen].filtration = [1]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for x1 in self.source.getGenerators():
for (a1, a2, x2), coeff in list(x1.delta().items()):
result.addDelta(gen_map[x1], gen_map[x2], a1, a2, coeff)
for y1 in self.target.getGenerators():
for (b1, b2, y2), coeff in list(y1.delta().items()):
result.addDelta(gen_map[y1], gen_map[y2], b1, b2, coeff)
for gen, ring_coeff in list(morphism.items()):
a1, a2 = gen.coeff
result.addDelta(
gen_map[gen.source], gen_map[gen.target], a1, a2, ring_coeff)
return result
class DDStructure(FreeModule):
"""Represents a type DD structure. Note delta() returns an element in the
tensor module Tensor((A,A,M)).
"""
def __init__(self, ring, algebra1, algebra2, side1, side2):
"""Specifies the algebras and sides of the type DD action."""
FreeModule.__init__(self, ring)
assert isinstance(algebra1, DGAlgebra)
assert isinstance(algebra2, DGAlgebra)
self.algebra1 = algebra1
self.side1 = side1
self.algebra2 = algebra2
self.side2 = side2
# Construct A tensor A tensor M. Add diff and the left action on this
# tensor product.
self.AAtensorM = Tensor((algebra1, algebra2, self))
def _mul_AA_AAtensorM(xxx_todo_changeme, xxx_todo_changeme1):
"""To be used as rmultiply() in AAtensorM. Multiply ACoeff1 with
AGen1 and ACoeff2 with AGen2.
"""
(AGen1, AGen2, MGen) = xxx_todo_changeme
(ACoeff1, ACoeff2) = xxx_todo_changeme1
return expandTensor((ACoeff1*AGen1, ACoeff2*AGen2, MGen),
self.AAtensorM)
def _diff_AAtensorM(xxx_todo_changeme2):
"""To be used as diff() in AAtensorM."""
(AGen1, AGen2, MGen) = xxx_todo_changeme2
return expandTensor((AGen1.diff(), AGen2, MGen), self.AAtensorM) \
+ expandTensor((AGen1, AGen2.diff(), MGen), self.AAtensorM) \
+ (AGen1, AGen2) * (MGen.delta())
self.AAtensorM.rmultiply = _mul_AA_AAtensorM
self.AAtensorM.diff = _diff_AAtensorM
def delta(self, generator):
"""Returns delta^1 of the generator."""
raise NotImplementedError("Differential not implemented.")
class SimpleDDStructure(DDStructure):
"""Represents a type DD structure with a finite number of generators, and
explicitly stored generating set and delta operation.
"""
def __init__(self, ring, algebra1, algebra2,
side1 = ACTION_LEFT, side2 = ACTION_LEFT):
"""Specifies the algebras and sides of the type D action."""
assert side1 == ACTION_LEFT and side2 == ACTION_LEFT, \
"Right action not implemented."
DDStructure.__init__(self, ring, algebra1, algebra2, side1, side2)
self.generators = set()
self.delta_map = dict()
def __len__(self):
return len(self.generators)
def delta(self, generator):
return self.delta_map[generator]
def getGenerators(self):
return list(self.generators)
def addGenerator(self, generator):
"""Add a generator. No effect if the generator already exists."""
assert generator.parent == self
assert isinstance(generator, DDGenerator)
self.generators.add(generator)
if generator not in self.delta_map:
self.delta_map[generator] = E0
def addDelta(self, gen_from, gen_to, alg_coeff1, alg_coeff2, ring_coeff):
"""Add ring_coeff * (alg_coeff1, alg_coeff2) * gen_to to the delta of
gen_from. The first four arguments should be generators.
"""
assert gen_from.parent == self and gen_to.parent == self
assert alg_coeff1.getLeftIdem() == gen_from.idem1
assert alg_coeff1.getRightIdem() == gen_to.idem1
assert alg_coeff2.getLeftIdem() == gen_from.idem2
assert alg_coeff2.getRightIdem() == gen_to.idem2
target_gen = TensorGenerator((alg_coeff1, alg_coeff2, gen_to),
self.AAtensorM)
self.delta_map[gen_from] += target_gen.elt(ring_coeff)
def deltaCoeff(self, gen_from, gen_to):
"""Return the coefficient (as bialgebra element) of gen_to in delta of
gen_from.
"""
if self.delta_map[gen_from] == 0:
return E0
else:
# No need for algebra structure on the tensor product
return self.delta_map[gen_from].fixLast(gen_to)
def reindex(self):
"""Replace the generators by simple generators indexed by integers."""
gen_list = list(self.generators)
new_gen_list = []
translate_dict = dict()
for i in range(len(gen_list)):
new_gen = gen_list[i].toSimpleDDGenerator("g%d"%(i+1))
new_gen_list.append(new_gen)
translate_dict[gen_list[i]] = new_gen
self.generators = set(new_gen_list)
new_delta = dict()
for k, v in list(self.delta_map.items()):
new_v = E0
for (AGen1, AGen2, MGen), coeff in list(v.items()):
target_gen = TensorGenerator(
(AGen1, AGen2, translate_dict[MGen]), self.AAtensorM)
new_v += target_gen.elt(coeff)
new_delta[translate_dict[k]] = new_v
self.delta_map = new_delta
if hasattr(self, "grading"):
new_grading = dict()
for gen, gr in list(self.grading.items()):
if gen in translate_dict: # gen is still in ddstr
new_grading[translate_dict[gen]] = gr
self.grading = new_grading
def testDelta(self):
"""Verify d^2 = 0 for this structure."""
for gen in self.generators:
if gen.delta().diff() != 0:
# Print the offending terms in d^2 for one generator.
print(gen, "==>")
for k, v in list(gen.delta().diff().items()):
print(v, "*", k)
return False
return True
def getReverseDelta(self):
"""Returns the reverse of delta map. Return value is a dictionary with
generators as keys, and list of ((a1, a2, gen), coeff) as values.
Every ((b1, b2, p), coeff) in the list for q means (b1*b2)*q occurs in
delta(p) with ring coefficient coeff.
"""
rev_delta = dict()
for x in self.generators:
rev_delta[x] = []
for p in self.generators:
for (b1, b2, q), coeff in list(p.delta().items()):
rev_delta[q].append(((b1, b2, p), coeff))
return rev_delta
def __str__(self):
result = "Type DD Structure.\n"
for k, v in list(self.delta_map.items()):
result += "d(%s) = %s\n" % (k, v)
return result
def morToD(self, other):
"""Compute the type D structure of morphisms from self to other. Note
``other`` must be a type D structure.
"""
assert self.algebra1 == other.algebra
alg_gens = self.algebra1.getGenerators()
xlist = self.getGenerators()
ylist = other.getGenerators()
gens = list()
dstr = SimpleDStructure(F2, self.algebra2.opp())
genType = MorDDtoDGenerator
def morGradingSet():
"""Find the grading set of the new type D structure."""
lr_domains = [(d1, d2.opp())
for d1, d2 in self.gr_set.periodic_domains]
self.lr_set = SimpleDbGradingSet(
self.gr_set.gr_group1, ACTION_LEFT,
self.gr_set.gr_group2.opp(), ACTION_RIGHT, lr_domains)
return GeneralGradingSet([self.lr_set.inverse(), other.gr_set])
def morGrading(gr_set, x, a, y):
"""Find the grading of the generator x -> ay in the morphism
type D structure. The grading set need to be provided as gr_set.
"""
gr_x1, gr_x2 = self.grading[x].data
gr_x_lr = SimpleDbGradingSetElement(self.lr_set,
(gr_x1, gr_x2.opp()))
gr = [gr_x_lr.inverse(), other.grading[y] * a.getGrading()]
return GeneralGradingSetElement(gr_set, gr)
# Prepare rev_delta for the last step in computing differentials
rev_delta = self.getReverseDelta()
# Get the list of generators
for x in xlist:
for a in alg_gens:
for y in ylist:
if x.idem1 == a.getLeftIdem() and \
y.idem == a.getRightIdem():
gens.append(genType(dstr, x, a, y))
for gen in gens:
dstr.addGenerator(gen)
# Get the type D structure maps
for gen in gens:
# Differential of y in (x -> ay)
x, a, y = gen.source, gen.coeff, gen.target
ady = a * y.delta()
for (b, q), coeff in list(ady.items()):
dstr.addDelta(gen, genType(dstr, x, b, q), None, coeff)
# Differential of a
for da_gen, coeff in list(a.diff().items()):
dstr.addDelta(gen, genType(dstr, x, da_gen, y), None, coeff)
# For each p such that (b1,b2)*x is in dp, add opp(b2)*(p->(b1*a)y)
for (b1, b2, p), coeff1 in rev_delta[x]:
for b1a_gen, coeff2 in list((b1*a).items()):
dstr.addDelta(gen, genType(dstr, p, b1a_gen, y),
b2.opp(), coeff1*coeff2)
# Find grading set and grading of elements
if hasattr(self, "gr_set") and hasattr(other, "gr_set"):
dstr.gr_set = morGradingSet()
dstr.grading = dict()
for gen in gens:
dstr.grading[gen] = morGrading(
dstr.gr_set, gen.source, gen.coeff, gen.target)
return dstr
def morToDD(self, other):
"""Compute the chain complex of type DD structure morphisms from self to
other. Note ``other`` must be a type DD structure over the same two
PMC's in the same order.
Currently does not keep track of gradings.
"""
assert self.algebra1 == other.algebra1
assert self.algebra2 == other.algebra2
alg1_gens = self.algebra1.getGenerators()
alg2_gens = self.algebra2.getGenerators()
# Type of coefficients of the morphism
tensor_alg = TensorDGAlgebra((self.algebra1, self.algebra2))
xlist = self.getGenerators()
ylist = other.getGenerators()
gens = list()
cx = SimpleChainComplex(F2)
genType = MorDDtoDDGenerator
# For computing differentials only
mor_cx = MorDDtoDDComplex(F2, self, other)
# Prepare rev_delta for the last step in computing differentials
rev_delta = self.getReverseDelta()
# Get the list of generators
for x in xlist:
for a1 in alg1_gens:
for a2 in alg2_gens:
for y in ylist:
if x.idem1 == a1.getLeftIdem() and \
y.idem1 == a1.getRightIdem() and \
x.idem2 == a2.getLeftIdem() and \
y.idem2 == a2.getRightIdem():
a = TensorGenerator((a1, a2), tensor_alg)
gens.append(genType(cx, x, a, y))
for gen in gens:
cx.addGenerator(gen)
# Get the differentials of type DD structure maps
for gen in gens:
for term, coeff in list(mor_cx.diff(gen).items()):
cx_term = genType(cx, term.source, term.coeff, term.target)
cx.addDifferential(gen, cx_term, coeff)
return cx
def hochschildCochains(self):
"""Returns the Hochschild cochain complex of self, i.e., the morphisms
from the DD identity to self.
"""
dd_id = identityDD(self.algebra1.pmc, self.algebra1.idem_size)
return dd_id.morToDD(self)
def simplify(self, cancellation_constraint = None):
"""Simplify a type DD structure using cancellation lemma.
cancellation_constraint is a function from two generators to boolean,
stating whether they can be cancelled.
"""
# Simplification is best done in terms of coefficients
# Build dictionary of coefficients
arrows = dict()
for gen in self.generators:
arrows[gen] = dict()
bialgebra = TensorDGAlgebra((self.algebra1, self.algebra2))
for gen in self.generators:
for (AGen1, AGen2, MGen), coeff in list(self.delta_map[gen].items()):
if MGen not in arrows[gen]:
arrows[gen][MGen] = E0
arrows[gen][MGen] += TensorGenerator(
(AGen1, AGen2), bialgebra) * coeff
arrows = simplifyComplex(
arrows, default_coeff = E0,
cancellation_constraint = cancellation_constraint)
# Now rebuild the type DD structure
self.generators = set()
self.delta_map = dict()
for x in arrows:
self.generators.add(x)
self.delta_map[x] = E0
for y, coeff in list(arrows[x].items()):
for (a1, a2), ring_coeff in list(coeff.items()):
target_gen = TensorGenerator((a1, a2, y), self.AAtensorM)
self.delta_map[x] += ring_coeff * target_gen
def toDStructure(self):
"""Convert this type DD structure into a type D structure over the
tensor product of two algebras.
"""
bialgebra = TensorDGAlgebra((self.algebra1, self.algebra2))
dstr = SimpleDStructure(self.ring, bialgebra, ACTION_LEFT)
gen_map = dict()
for gen in self.generators:
new_gen = gen.toDGenerator(dstr)
gen_map[gen] = new_gen
dstr.addGenerator(new_gen)
for gen_from in self.generators:
for (a1, a2, gen_to), coeff in list(self.delta_map[gen_from].items()):
dstr.addDelta(gen_map[gen_from], gen_map[gen_to],
TensorGenerator((a1, a2), bialgebra), coeff)
return dstr
def registerHDiagram(self, diagram, base_gen, base_gr = None):
"""Associate the given diagram as the Heegaard diagram from which this
type DD structure can be derived. We will attempt to match generators
of the type DD structure to generators of the Heegaard diagram.
Currently this is possible only if no two generators have the same
idempotents (so the match can be made by comparing idempotents).
As a result, computes grading of each generator from the Heegaard
diagram and checks it against type DD operations. Attributes added are:
*. self.hdiagram - the Heegaard diagram.
*. self.hdiagram_gen_map - dictionary mapping generators in the type DD
structure to generators in Heegaard diagram.
*. self.gr_set - the grading set (of type SimpleDbGradingSet).
*. self.grading - dictionary mapping generators in the type DD
structure to their gradings.
"""
self.hdiagram = diagram
# Get PMC's and check that they make sense
hd_pmc1, hd_pmc2 = self.hdiagram.pmc_list
dds_pmc1, dds_pmc2 = self.algebra1.pmc, self.algebra2.pmc
assert hd_pmc1.opp() == dds_pmc1
assert hd_pmc2.opp() == dds_pmc2
# Now attempt to match generators
self.hdiagram_gen_map = dict()
idem_size = 2 * dds_pmc1.genus - len(base_gen.idem1)
gens = self.getGenerators()
hgens = diagram.getHFGenerators(idem_size = idem_size)
for gen in gens:
for hgen in hgens:
hgen_idem1, hgen_idem2 = hgen.getDIdem()
if (gen.idem1, gen.idem2) == (hgen_idem1, hgen_idem2):
self.hdiagram_gen_map[gen] = hgen
break
assert gen in self.hdiagram_gen_map
# Compute grading and check consistency with algebra actions
base_hgen = self.hdiagram_gen_map[base_gen]
self.gr_set, gr = self.hdiagram.computeDDGrading(base_hgen, base_gr)
self.grading = dict()
for gen in gens:
self.grading[gen] = gr[self.hdiagram_gen_map[gen]]
if ASSERT_LEVEL > 0:
self.checkGrading()
@memorize
def dual(self):
"""Returns the dual of this type DD structure, which is the type DD
invariant of the orientation reversed bordered 3-manifold. If self has
left action over A1 and A2, then the new DD structure has left action
over A2.opp() and A1.opp() (in that order).
"""
dual_str = SimpleDDStructure(
self.ring, self.algebra2.opp(), self.algebra1.opp(),
self.side2, self.side1)
# Map from generators in self to generators in dual_str
gen_map = dict()
for x in self.generators:
# As in the type D case, simple generators only
assert isinstance(x, SimpleDDGenerator)
new_x = SimpleDDGenerator(dual_str, x.idem2.opp(), x.idem1.opp(),
x.name)
dual_str.addGenerator(new_x)
gen_map[x] = new_x
for x in self.generators:
for (a, b, y), coeff in list(x.delta().items()):
dual_str.addDelta(gen_map[y], gen_map[x], b.opp(), a.opp(),
coeff)
return dual_str
def checkGrading(self):
for x in self.generators:
for (a1, a2, y), coeff in list(x.delta().items()):
gr_x = self.grading[x]
gr_y = self.grading[y]
assert gr_x - 1 == gr_y * [a1.getGrading(), a2.getGrading()]
def compareDDStructures(self, other):
"""Compare two type DD structures, print out any differences."""
return self.toDStructure().compareDStructures(other.toDStructure())
def DDStrFromChords(alg1, alg2, idem_pairs, chord_pairs):
"""Construct type DD structure from list of idempotent pairs and chord
pairs.
- idem_pairs is list of pairs of Idempotent.
- chord_pairs is list of pairs of Strands.
"""
ddstr = SimpleDDStructure(F2, alg1, alg2)
for i in range(len(idem_pairs)):
ddstr.addGenerator(SimpleDDGenerator(
ddstr, idem_pairs[i][0], idem_pairs[i][1], i))
gen_set = ddstr.getGenerators()
for x in gen_set:
for y in gen_set:
for l_chord, r_chord in chord_pairs:
if alg1.mult_one and not l_chord.isMultOne():
continue
if alg2.mult_one and not r_chord.isMultOne():
continue
if l_chord.idemCompatible(x.idem1, y.idem1) and \
r_chord.idemCompatible(x.idem2, y.idem2):
ddstr.addDelta(x, y, StrandDiagram(alg1, x.idem1, l_chord),
StrandDiagram(alg2, x.idem2, r_chord), 1)
return ddstr
def identityDD(pmc, idem_size = None):
"""Returns the identity type DD structure for a given PMC."""
if idem_size is None:
idem_size = pmc.genus
n = pmc.n
pmcopp = pmc.opp()
alg1 = pmc.getAlgebra(idem_size = idem_size)
alg2 = pmcopp.getAlgebra(idem_size = 2*pmc.genus - idem_size)
ddstr = SimpleDDStructure(F2, alg1, alg2)
idems = pmc.getIdempotents(idem_size)
idem_pairs = [(idem, idem.opp().comp()) for idem in idems]
chord_pairs = [(Strands(pmc, [(i, j)]), Strands(pmcopp, [(n-1-j, n-1-i)]))
for i in range(n) for j in range(i+1, n)]
ddstr = DDStrFromChords(alg1, alg2, idem_pairs, chord_pairs)
# Any generator can serve as base_gen
for gen in ddstr.getGenerators():
base_gen = gen
break
ddstr.registerHDiagram(getIdentityDiagram(pmc), base_gen)
return ddstr
def DDStrFromDStr(dstr, genus1):
"""Obtain the type DD structure from a type D structure that is related by
drilling. See section 7.3 of paper 'Computing HF by Factoring Mapping
Classes'.
If dstr has left type D action by algebra A(Z_1 # Z_2), where genus1
specifies the genus of Z_1, then ddstr will have left type DD action by
A(Z_1) and A(Z_2).
"""
assert dstr.side == ACTION_LEFT
pmc_all = dstr.algebra.pmc
assert dstr.algebra.idem_size == pmc_all.genus
pmc1, pmc2 = unconnectSumPMC(pmc_all, genus1)
mult_one = dstr.algebra.mult_one
ddstr = SimpleDDStructure(F2, pmc1.getAlgebra(mult_one = mult_one),
pmc2.getAlgebra(mult_one = mult_one))
gen_map = {}
for x in dstr.getGenerators():
# Split idempotent of x into two parts
xidem = x.idem
x1_idem = Idempotent(pmc1,
[pairid for pairid in xidem if pairid < 2*genus1])
x2_idem = Idempotent(pmc2,
[pairid-2*genus1 for pairid in xidem
if pairid >= 2*genus1])
if len(x1_idem) != genus1:
continue
gen_map[x] = SimpleDDGenerator(ddstr, x1_idem, x2_idem, x.name)
ddstr.addGenerator(gen_map[x])
cut_point = 4 * genus1
for x in dstr.getGenerators():
for (a, y), coeff in list(x.delta().items()):
if a.multiplicity[cut_point-1] == 0:
# The interval (cut_point-1, cut_point) is unoccupied
a1, a2 = unconnectSumStrandDiagram(a, genus1)
ddstr.addDelta(gen_map[x], gen_map[y], a1, a2, coeff)
return ddstr
| 29,142
| 40.632857
| 82
|
py
|
bfh_python
|
bfh_python-master/linalgtest.py
|
"""Unit test for linalg.py."""
from linalg import *
from utility import F2
import unittest
class RowSystemTest(unittest.TestCase):
def testRowSystem(self):
rows1 = [[2,0],[3,1]]
sys1 = RowSystem(rows1)
self.assertEqual(sys1.getComb([1, 1]), [-1, 1])
self.assertEqual(sys1.getComb([0,-2]), [ 3,-2])
self.assertEqual(sys1.getComb([1,0]), None)
self.assertEqual(sys1.vecReduce([1,0]), ([2,-1], [0,1]))
self.assertEqual(sys1.vecReduce([1,0], use_rational = True),
([Fraction(1,2),0], [0,0]))
self.assertEqual(sys1.reduceProfile(), [1, 2])
self.assertEqual(sys1.shortForm([1,0]), [1])
rows2 = [[2,0,0],[0,2,2],[1,1,1]]
sys2 = RowSystem(rows2)
self.assertTrue(sys2.getZeroComb() in ([[1,1,-2]],[[-1,-1,2]]))
self.assertEqual(sys2.vecReduce([1,0,0])[1], [0,1,1]) # reduced form
self.assertEqual(sys2.reduceProfile(), [1, 2, 0])
class F2RowSystemTest(unittest.TestCase):
def testF2RowSystem(self):
rows1 = [[1,1,0,0,0],
[0,0,1,1,0],
[0,0,0,0,1],
[0,0,1,0,0]]
sys1 = F2RowSystem(rows1)
self.assertEqual(sys1.getComb([1,1,1,1,1]), [1,1,1,0])
self.assertEqual(sys1.getComb([1,1,0,0,1]), [1,0,1,0])
self.assertEqual(sys1.getComb([1,1,1,0,1]), [1,0,1,1])
self.assertEqual(sys1.getComb([1,1,0,1,1]), [1,1,1,1])
if __name__ == "__main__":
unittest.main()
| 1,493
| 36.35
| 76
|
py
|
bfh_python
|
bfh_python-master/latextest.py
|
"""Unit test for latex.py"""
from latex import *
from arcslide import Arcslide
from arcslideda import ArcslideDA
from localpmc import LocalPMC, LocalStrandDiagram
from pmc import PMC
from pmc import antipodalPMC, splitPMC
import unittest
class LatexTest(unittest.TestCase):
def testPrintLocalDAStructure(self):
slide, pmc_map1, pmc_map2 = (
# Uncomment one of the following lines
# Arcslide(splitPMC(2), 2, 1), None, None, # short, down
# Arcslide(splitPMC(2), 2, 3), None, None, # short, up
# Arcslide(antipodalPMC(2), 2, 1), [0,1,2,3,4,6,7], [0,1,3,4,5,6,7],
Arcslide(antipodalPMC(2), 4, 5), [0,1,3,4,5,6,7], [0,1,2,3,4,6,7],
)
local_dastr = ArcslideDA(slide).getLocalDAStructure()
f = open("latex_output.txt", "w")
f.write(beginDoc())
f.write(showDAStructure(local_dastr, pmc_map1, pmc_map2))
f.write(endDoc())
f.close()
if __name__ == "__main__":
unittest.main()
| 1,004
| 33.655172
| 80
|
py
|
bfh_python
|
bfh_python-master/dastructuretest.py
|
"""Unit test for dastructure.py"""
from dastructure import *
from arcslide import Arcslide
from arcslideda import ArcslideDA
from dstructure import infTypeD, zeroTypeD
from pmc import splitPMC
from utility import DEFAULT_GRADING, SMALL_GRADING
import unittest
class DAStructureTest(unittest.TestCase):
def testIdentityDA(self):
dastr = identityDA(splitPMC(2))
self.assertTrue(dastr.testDelta())
def testIdentityDAMatchDiagram(self):
dastr = identityDA(splitPMC(2))
dastr.checkGrading()
if DEFAULT_GRADING == SMALL_GRADING:
# Special check for the identity diagram: all gradings should be
# zero
for gen in dastr.getGenerators():
self.assertEqual(dastr.grading[gen], dastr.gr_set.zero())
class TensorTest(unittest.TestCase):
def testDATensorD(self):
dastr = identityDA(splitPMC(2))
dstr = zeroTypeD(2)
dstr_result = dastr.tensorD(dstr)
cx = dstr_result.morToD(infTypeD(2))
cx.simplify()
# Basic check that dstr_result is still zeroTypeD(2)
self.assertEqual(len(cx), 1)
if __name__ == "__main__":
unittest.main()
| 1,180
| 30.918919
| 76
|
py
|
bfh_python
|
bfh_python-master/dehntwist.py
|
"""Dehn twists starting at linear PMC and their type DD structures."""
from algebra import TensorDGAlgebra, TensorGenerator
from algebra import E0
from ddstructure import MorDDtoDDComplex, MorDDtoDDGenerator, \
SimpleDDGenerator, SimpleDDStructure
from ddstructure import DDStrFromChords, identityDD
from pmc import Idempotent, Strands, StrandDiagram
from pmc import linearPMC
from utility import memorize
from utility import F2, NEG, POS
import itertools
class DehnTwist(object):
"""Represents a Dehn twist starting at linear PMC."""
def __init__(self, genus, c_pair, orientation):
"""Specifies genus of the starting pmc, id of the pair of Dehn twist,
and orientation of the twist (POS or NEG).
"""
self.genus = genus
self.orientation = orientation
self.start_pmc = linearPMC(genus)
self.end_pmc = self.start_pmc
self.n = 4 * genus
self.c1, self.c2 = self.start_pmc.pairs[c_pair]
self.c_pair = c_pair
assert self.c2 == self.c1 + 3
# Two positions between c1 and c2, for (d)own and (u)p
self.d = self.c1 + 1
self.u = self.c1 + 2
self.d_pair = self.start_pmc.pairid[self.d]
self.u_pair = self.start_pmc.pairid[self.u]
@memorize
def getDDStructure(self):
"""Returns the type DD structure corresponding to this dehn twist."""
self.all_idems = self._getIdems()
self.all_chords = []
for chord_type in self._PChords:
chord_type(self)
if self.orientation == NEG:
self.all_chords = [(r.opp(), l.opp()) for l, r in self.all_chords]
# mult_one = False case is more complicated
alg1 = self.start_pmc.getAlgebra(mult_one = True)
alg2 = alg1
ddstr = DDStrFromChords(alg1, alg2, self.all_idems, self.all_chords)
assert ddstr.testDelta()
return ddstr
def _getIdems(self):
"""Returns the set of possible idempotent-pairs for generators."""
all_idems = []
left_idems = self.start_pmc.getIdempotents()
# Generators of type X (complementary)
for idem in left_idems:
all_idems.append((idem, idem.opp().comp()))
# Generators of type Y (near-complementary)
for idem in left_idems:
for rem_pair in (self.d_pair, self.u_pair):
if self.c_pair in idem and not rem_pair in idem:
idem_data = list(idem.comp())
idem_data.remove(rem_pair)
idem_data.append(self.c_pair)
right_idem = Idempotent(self.end_pmc, idem_data).opp()
all_idems.append((idem, right_idem))
return all_idems
def _addPair(self, data1, data2):
"""Add this pair of chord data."""
chord_left = Strands(self.start_pmc, data1)
data2 = [(self.n-1-q, self.n-1-p) for p, q in data2]
chord_right = Strands(self.end_pmc.opp(), data2)
self.all_chords.append((chord_left, chord_right))
def _P1Chords(self):
for x in range(self.n):
for y in range(x+1, self.n):
if any([(x, y) == (self.d, self.u),
x < self.c1 and self.c2 <= y,
x <= self.c1 and self.c2 < y,
y <= self.c1,
x >= self.c2]):
self._addPair([(x, y)], [(x, y)])
def _P2Chords(self):
for x in range(self.c2, self.n):
self._addPair([(self.d, x)], [(self.c1, x)])
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, self.c1), (self.d, y)], [(x, y)])
for x in range(0, self.c1+1):
self._addPair([(x, self.c2)], [(x, self.u)])
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, y)], [(x, self.u), (self.c2, y)])
def _P3Chords(self):
for x in range(0, self.c1+1):
self._addPair([(x, self.c2)], [(x, self.d)])
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, y)], [(x, self.d), (self.c2, y)])
for x in range(self.c2, self.n):
self._addPair([(self.u, x)], [(self.c1, x)])
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, self.c1), (self.u, y)], [(x, y)])
def _P4Chords(self):
self._addPair([], [(self.u, self.c2)])
for x in range(self.c2+1, self.n):
self._addPair([(self.c2, x)], [(self.u, x)])
self._addPair([(self.c1, self.d)], [])
for x in range(0, self.c1):
self._addPair([(x, self.d)], [(x, self.c1)])
def _P5Chords(self):
self._addPair([(self.c1, self.u)], [])
for x in range(0, self.c1):
self._addPair([(x, self.u)], [(x, self.c1)])
self._addPair([], [(self.d, self.c2)])
for x in range(self.c2+1, self.n):
self._addPair([(self.c2, x)], [(self.d, x)])
def _P6Chords(self):
self._addPair([(self.d, self.u)], [])
self._addPair([], [(self.d, self.u)])
def _P7Chords(self):
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, self.c1), (self.c2, y)],
[(x, self.c1), (self.c2, y)])
def _P8Chords(self):
for x in range(self.c2, self.n):
self._addPair([(self.u, x)], [(self.c1, self.d), (self.u, x)])
for x in range(0, self.c1):
for y in range(self.c2, self.n):
self._addPair([(x, self.c1), (self.u, y)],
[(x, self.d), (self.u, y)])
for x in range(0, self.c1+1):
self._addPair([(x, self.d), (self.u, self.c2)], [(x, self.d)])
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, self.d), (self.u, y)],
[(x, self.d), (self.c2, y)])
def _P9Chords(self):
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
self._addPair([(x, self.d), (self.u, y)], [(x, y)])
self._addPair([(x, y)], [(x, self.d), (self.u, y)])
_PChords = [_P1Chords, _P2Chords, _P3Chords, _P4Chords, _P5Chords,
_P6Chords, _P7Chords, _P8Chords, _P9Chords]
class AntiBraid(object):
"""Represents the anti-braid resolution."""
def __init__(self, genus, c_pair):
"""Specifies genus of the starting pmc and the id of the pair of
anti-braid resolution.
"""
self.genus = genus
self.c_pair = c_pair
self.start_pmc = linearPMC(genus)
self.end_pmc = self.start_pmc
self.n = 4 * genus
self.c1, self.c2 = self.start_pmc.pairs[c_pair]
if self.c2 == self.c1 + 3:
self.is_degenerate = False
else:
assert self.c2 == self.c1 + 2
assert self.c1 == 0 or self.c2 == self.n - 1
self.is_degenerate = True
if self.is_degenerate:
# One position between c1 and c2, called p
self.p = self.c1 + 1
self.p_pair = self.start_pmc.pairid[self.p]
else:
# Two positions between c1 and c2, for (d)own and (u)p
self.d = self.c1 + 1
self.u = self.c1 + 2
self.d_pair = self.start_pmc.pairid[self.d]
self.u_pair = self.start_pmc.pairid[self.u]
@memorize
def getDDStructure(self):
"""Returns the type DD structure corresponding to this dehn twist."""
all_idems = self._getIdems()
all_chords = []
for chord_type in self._getChordsList():
all_chords.extend(chord_type())
all_chords = [self._StrandsFromChords(chord1, chord2)
for chord1, chord2 in all_chords]
alg1 = self.start_pmc.getAlgebra(mult_one = True)
alg2 = alg1
ddstr = DDStrFromChords(alg1, alg2, all_idems, all_chords)
assert ddstr.testDelta()
return ddstr
@memorize
def getAdmissibleDDStructure(self):
"""Returns the type DD structure corresponding to the Heegaard diagram
created by a finger move of the beta circle to the right.
"""
alg1 = self.start_pmc.getAlgebra(mult_one = True)
alg2 = alg1
ddstr = SimpleDDStructure(F2, alg1, alg2)
# Add generators for the non-admissible case - that is, those generators
# that do not contain the two intersections created by the finger move.
original_idems = self._getIdems()
for i in range(len(original_idems)):
left_idem, right_idem = original_idems[i]
ddstr.addGenerator(
SimpleDDGenerator(ddstr, left_idem, right_idem, "0_%d" % i))
# Now add the new generators. These just correspond to the complementary
# idempotents with c_pair on the left, repeated twice.
left_idems = [idem for idem in self.start_pmc.getIdempotents()
if self.c_pair in idem]
for i in range(len(left_idems)):
left_idem = left_idems[i]
right_idem = left_idem.opp().comp()
ddstr.addGenerator(
SimpleDDGenerator(ddstr, left_idem, right_idem, "1_%d" % i))
ddstr.addGenerator(
SimpleDDGenerator(ddstr, left_idem, right_idem, "2_%d" % i))
gen_set = []
for i in range(3):
gen_set.append([gen for gen in ddstr.getGenerators()
if gen.name[:1] == "%d" % i])
# Enumerate the non-special chords (those that do not dependent on the
# idempotent. See the functions themselves for the format of all_chords.
if self.is_degenerate:
all_chords = self._getAdmissibleNonSpecialChordsDegenerate()
else:
all_chords = self._getAdmissibleNonSpecialChords()
for i, j in itertools.product(list(range(3)), list(range(3))):
all_chords[i][j] = [self._StrandsFromChords(chord1, chord2)
for chord1, chord2 in all_chords[i][j]]
# Now we emulate the logic in ddstructure.DDStrFromChords, except we
# distinguish between ''classes'' of generators, by the first character
# of the name of the generator.
for i, j in itertools.product(list(range(3)), list(range(3))):
for x, y in itertools.product(gen_set[i], gen_set[j]):
for l_chord, r_chord in all_chords[i][j]:
if l_chord.idemCompatible(x.idem1, y.idem1) and \
r_chord.idemCompatible(x.idem2, y.idem2):
ddstr.addDelta(x, y,
StrandDiagram(alg1, x.idem1, l_chord),
StrandDiagram(alg2, x.idem2, r_chord), 1)
# Special handling for these. From class 2 to class 1, add only if the
# c-pair is occupied on the left side (and not on the right).
# Non-degenerate cases only.
sp_chords = []
if not self.is_degenerate:
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
sp_chords.append(([(x, y)], [(x, self.u), (self.u, y)]))
sp_chords.append(([(x, y)], [(x, self.d), (self.d, y)]))
sp_chords.append(([(x, self.d), (self.u, y)],
[(x, self.d), (self.u, y)]))
sp_chords = [self._StrandsFromChords(chord1, chord2)
for chord1, chord2 in sp_chords]
for x, y in itertools.product(gen_set[2], gen_set[1]):
for l_chord, r_chord in sp_chords:
if self.c_pair in x.idem1 and \
l_chord.idemCompatible(x.idem1, y.idem1) and \
r_chord.idemCompatible(x.idem2, y.idem2):
assert self.c_pair not in x.idem2.opp() and \
self.c_pair in y.idem1 and \
self.c_pair not in y.idem2.opp()
ddstr.addDelta(x, y,
StrandDiagram(alg1, x.idem1, l_chord),
StrandDiagram(alg2, x.idem2, r_chord), 1)
assert ddstr.testDelta()
return ddstr
def _getAdmissibleNonSpecialChordsDegenerate(self):
"""Returns the non-special chords (those that do not depend on the
idempotent in the degenerate admissible case.
"""
# Initialize all_chords to be a 3*3 matrix of lists, with each entry
# (i, j) containing the chord pairs from generators of class i to
# generators of class j.
all_chords = []
for i in range(3):
all_chords.append([])
for j in range(3):
all_chords[i].append([])
# Idempotent actions from 1 to 2.
all_chords[1][2].append(([], []))
# Basic chords.
all_chords[0][2].append(([], [(self.p, self.c2)]))
all_chords[1][0].append(([], [(self.c1, self.p)]))
# Incorporate the left side.
all_chords[0][0].append(([(self.c1, self.c2)], []))
all_chords[2][0].append(([(self.c1, self.c2)], [(self.c1, self.p)]))
all_chords[0][1].append(([(self.c1, self.c2)], [(self.p, self.c2)]))
# Identity away from the anti-braid.
for x in range(0, self.n):
for y in range(x+1, self.n):
if y < self.c1 or x > self.c2:
for i in range(3):
all_chords[i][i].append(([(x, y)], [(x, y)]))
return all_chords
def _getAdmissibleNonSpecialChords(self):
"""Returns the non-special chords (those that do not depend on the
idempotent in the non-degenerate admissible case.
"""
# Initialize all_chords to be a 3*3 matrix of lists, with each entry
# (i, j) containing the chord pairs from generators of class i to
# generators of class j.
all_chords = []
for i in range(3):
all_chords.append([])
for j in range(3):
all_chords[i].append([])
# Idempotent actions from 1 to 2.
all_chords[1][2].append(([], []))
# Basic chords.
all_chords[0][0].append(([], [(self.d, self.u)]))
all_chords[0][2].append(([], [(self.u, self.c2)]))
all_chords[1][0].append(([], [(self.c1, self.d)]))
all_chords[0][2].append(([], [(self.d, self.c2)]))
all_chords[1][0].append(([], [(self.c1, self.u)]))
# Incorporate the intervals (c1-1, c1) and (c2, c2+1).
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
all_chords[0][0].append(
([(x, self.c1), (self.c2, y)],
[(x, self.c1), (self.c2, y)]))
all_chords[2][0].append(
([(x, self.c1), (self.c2, y)], [(x, self.u), (self.c2, y)]))
all_chords[2][0].append(
([(x, self.c1), (self.c2, y)], [(x, self.d), (self.c2, y)]))
all_chords[0][1].append(
([(x, self.c1), (self.c2, y)], [(x, self.c1), (self.u, y)]))
all_chords[0][1].append(
([(x, self.c1), (self.c2, y)], [(x, self.c1), (self.d, y)]))
for i in range(3):
all_chords[i][i].append(
([(x, self.c1), (self.c2, y)], [(x, y)]))
# Incorporate the left side.
all_chords[0][0].extend(
[([(self.d, self.u)], []),
([(self.c1, self.d), (self.u, self.c2)], []),
([(self.c1, self.c2)], [])])
for i in (1, 2):
all_chords[i][i].append(([(self.d, self.u)], [(self.d, self.u)]))
all_chords[2][0].extend(
[([(self.c1, self.d), (self.u, self.c2)], [(self.c1, self.d)]),
([(self.c1, self.c2)], [(self.c1, self.d)]),
([(self.c1, self.d), (self.u, self.c2)], [(self.c1, self.u)]),
([(self.c1, self.c2)], [(self.c1, self.u)])])
all_chords[0][1].extend(
[([(self.c1, self.d), (self.u, self.c2)], [(self.u, self.c2)]),
([(self.c1, self.c2)], [(self.u, self.c2)]),
([(self.c1, self.d), (self.u, self.c2)], [(self.d, self.c2)]),
([(self.c1, self.c2)], [(self.d, self.c2)])])
# Again add intervals (c1-1, c1) and (c2, c2+1).
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
all_chords[0][0].extend(
[([(x, y)], [(x, y)]),
([(x, y)], [(x, self.d), (self.u, y)]),
([(x, y)], [(x, self.c1), (self.c2, y)]),
([(x, self.c1), (self.c2, y)], [(x, self.d), (self.u, y)]),
([(x, self.d), (self.u, y)], [(x, self.c1), (self.c2, y)]),
([(x, self.d), (self.u, y)], [(x, y)])])
all_chords[2][0].extend(
[([(x, y)], [(x, self.d), (self.c2, y)]),
([(x, y)], [(x, self.u), (self.c2, y)]),
([(x, self.d), (self.u, y)], [(x, self.d), (self.c2, y)]),
([(x, self.d), (self.u, y)], [(x, self.u), (self.c2, y)])])
all_chords[0][1].extend(
[([(x, y)], [(x, self.c1), (self.u, y)]),
([(x, y)], [(x, self.c1), (self.d, y)]),
([(x, self.d), (self.u, y)], [(x, self.c1), (self.u, y)]),
([(x, self.d), (self.u, y)], [(x, self.c1), (self.d, y)])])
# Identity away from the anti-braid.
for x in range(0, self.n):
for y in range(x+1, self.n):
if y < self.c1 or x > self.c2:
for i in range(3):
all_chords[i][i].append(([(x, y)], [(x, y)]))
return all_chords
def _getIdems(self):
"""Returns the set of possible idempotent-pairs for generators."""
all_idems = []
left_idems = self.start_pmc.getIdempotents()
# Near-complementary generators
if self.is_degenerate:
rem_pairs = (self.p_pair,)
else:
rem_pairs = (self.d_pair, self.u_pair)
for idem in left_idems:
for rem_pair in rem_pairs:
if self.c_pair in idem and not rem_pair in idem:
idem_data = list(idem.comp())
idem_data.remove(rem_pair)
idem_data.append(self.c_pair)
right_idem = Idempotent(self.end_pmc, idem_data).opp()
all_idems.append((idem, right_idem))
return all_idems
def _StrandsFromChords(self, chord1, chord2):
"""Create strand objects from lists of chords. Points in chord2 are
reversed (refer to the opposite pmc).
"""
chord_left = Strands(self.start_pmc, chord1)
chord2 = [(self.n-1-q, self.n-1-p) for p, q in chord2]
chord_right = Strands(self.end_pmc.opp(), chord2)
return (chord_left, chord_right)
def _B1Chords(self):
return [([(x, y)], [(x, y)])
for x in range(self.n) for y in range(x+1, self.n)
if (x < self.c1 or x > self.c2) and \
(y < self.c1 or y > self.c2)]
def _B2Chords(self):
return [([(self.d, self.u)], []), ([], [(self.d, self.u)])]
def _B3Chords(self):
return [([(self.c1, self.d), (self.u, self.c2)], []),
([], [(self.c1, self.d), (self.u, self.c2)])]
def _B4Chords(self):
return [([(self.c1, self.c2)], []), ([], [(self.c1, self.c2)])]
def _B5Chords(self):
result = []
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
result.append(([(x, self.c1), (self.c2, y)], [(x, y)]))
result.append(([(x, y)], [(x, self.c1), (self.c2, y)]))
return result
def _B6Chords(self):
result = []
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
result.append(([(x, self.d), (self.u, y)], [(x, y)]))
result.append(([(x, y)], [(x, self.d), (self.u, y)]))
return result
def _B7Chords(self):
return [([(x, self.c1), (self.c2, y)], [(x, self.c1), (self.c2, y)])
for x in range(0, self.c1) for y in range(self.c2+1, self.n)]
def _B8Chords(self):
result = []
for x in range(0, self.c1):
for y in range(self.c2+1, self.n):
result.append(([(x, self.d), (self.u, y)],
[(x, self.c1), (self.c2, y)]))
result.append(([(x, self.c1), (self.c2, y)],
[(x, self.d), (self.u, y)]))
return result
def _getChordsList(self):
if self.is_degenerate:
return [self._B1Chords, self._B4Chords]
else:
return [self._B1Chords, self._B2Chords, self._B3Chords,
self._B4Chords, self._B5Chords, self._B6Chords,
self._B7Chords, self._B8Chords]
class DehnSurgery(object):
"""Represents a Dehn twist starting at linear PMC."""
def __init__(self, genus, c_pair, orientation):
"""Specifies genus of the starting pmc, id of the pair of Dehn twist,
and orientation of the twist (POS or NEG).
"""
self.genus = genus
self.orientation = orientation
self.start_pmc = linearPMC(genus)
self.end_pmc = self.start_pmc
self.n = 4 * genus
self.c1, self.c2 = self.start_pmc.pairs[c_pair]
self.c_pair = c_pair
if self.c2 == self.c1 + 3:
self.is_degenerate = False
else:
assert self.c2 == self.c1 + 2
assert self.c1 == 0 or self.c2 == self.n - 1
self.is_degenerate = True
if not self.is_degenerate:
# Two positions between c1 and c2, for (d)own and (u)p
self.d = self.c1 + 1
self.u = self.c1 + 2
def _StrandsFromChords(self, chord1, chord2):
"""Create strand objects from lists of chords. Points in chord2 are
reversed (refer to the opposite pmc).
"""
chord_left = Strands(self.start_pmc, chord1)
chord2 = [(self.n-1-q, self.n-1-p) for p, q in chord2]
chord_right = Strands(self.end_pmc.opp(), chord2)
return (chord_left, chord_right)
@memorize
def getMorphism(self, is_admissible = False):
id_dd = identityDD(self.start_pmc)
anti_braid = AntiBraid(self.genus, self.c_pair)
if not is_admissible:
ab_dd = anti_braid.getDDStructure()
else:
ab_dd = anti_braid.getAdmissibleDDStructure()
if self.orientation == NEG:
source = id_dd
target = ab_dd
else:
source = ab_dd
target = id_dd
morphism_cx = MorDDtoDDComplex(F2, source, target)
all_chords = []
for chord_type in self._getChordsList():
all_chords.extend(chord_type())
all_chords = [self._StrandsFromChords(chord1, chord2) for
chord1, chord2 in all_chords]
morphism = E0
alg = self.start_pmc.getAlgebra()
assert alg.mult_one is True # not prepared for the other case
tensor_alg = TensorDGAlgebra((alg, alg))
# Similar to method in DDStrFromChords
for x in source.getGenerators():
for y in target.getGenerators():
for l_chord, r_chord in all_chords:
if l_chord.idemCompatible(x.idem1, y.idem1) and \
r_chord.idemCompatible(x.idem2, y.idem2):
a1 = StrandDiagram(alg, x.idem1, l_chord)
a2 = StrandDiagram(alg, x.idem2, r_chord)
coeff = TensorGenerator((a1, a2), tensor_alg)
morphism += 1*MorDDtoDDGenerator(
morphism_cx, x, coeff, y)
if not is_admissible:
return morphism
# Additional chords to / from the new generators in the admissible
# case.
new_chords = []
for i in range(2):
new_chords.append([])
if self.orientation == NEG:
new_chords[0].append(
([(self.c2-1, self.c2)], [(self.c2-1, self.c2)]))
if not self.is_degenerate:
new_chords[0].append(
([(self.c2-2, self.c2)], [(self.c2-2, self.c2)]))
for y in range(self.c2+1, self.n):
new_chords[0].append(([(self.c2-1, y)], [(self.c2-1, y)]))
if not self.is_degenerate:
new_chords[0].append(([(self.c2-2, y)], [(self.c2-2, y)]))
for x in range(0, self.c1):
new_chords[0].append(
([(x, self.c1), (self.c2, y)], [(x, y)]))
for x in range(0, self.c1):
new_chords[1].append(([(x, self.c1)], [(x, self.c2)]))
else: # self.orientation == POS
new_chords[1].append(
([(self.c1, self.c1+1)], [(self.c1, self.c1+1)]))
if not self.is_degenerate:
new_chords[1].append(
([(self.c1, self.c1+2)], [(self.c1, self.c1+2)]))
for x in range(0, self.c1):
new_chords[1].append(([(x, self.c1+1)], [(x, self.c1+1)]))
if not self.is_degenerate:
new_chords[1].append(([(x, self.c1+2)], [(x, self.c1+2)]))
for y in range(self.c2+1, self.n):
new_chords[1].append(
([(x, self.c1), (self.c2, y)], [(x, y)]))
for x in range(self.c2+1, self.n):
new_chords[0].append(([(self.c2, x)], [(self.c1, x)]))
for i in range(2):
new_chords[i] = [self._StrandsFromChords(chord1, chord2) for
chord1, chord2 in new_chords[i]]
if self.orientation == NEG:
source_gen = source.getGenerators()
target_gen = [gen for gen in target.getGenerators()
if gen.name[0] == "%d" % (i+1)]
else: # self.orientation == POS
source_gen = [gen for gen in source.getGenerators()
if gen.name[0] == "%d" % (i+1)]
target_gen = target.getGenerators()
for x, y in itertools.product(source_gen, target_gen):
if self.orientation == NEG and self.c_pair not in y.idem1:
continue
for l_chord, r_chord in new_chords[i]:
if l_chord.idemCompatible(x.idem1, y.idem1) and \
r_chord.idemCompatible(x.idem2, y.idem2):
a1 = StrandDiagram(alg, x.idem1, l_chord)
a2 = StrandDiagram(alg, x.idem2, r_chord)
coeff = TensorGenerator((a1, a2), tensor_alg)
morphism += 1*MorDDtoDDGenerator(
morphism_cx, x, coeff, y)
return morphism
def _N1Chords(self):
return [([(self.c2-1, self.c2)], []), ([], [(self.c1, self.c1+1)])]
def _N2Chords(self):
return [([(x, self.c1)], [(x, self.c1+1)]) for x in range(self.c1)] + \
[([(self.c2-1, x)], [(self.c2, x)])
for x in range(self.c2+1, self.n)]
def _N3Chords(self):
return [([(self.d, self.c2)], []), ([], [(self.c1, self.u)])]
def _N4Chords(self):
return [([(x, self.c1)], [(x, self.u)]) for x in range(self.c1)] + \
[([(self.d, x)], [(self.c2, x)]) for x in range(self.c2+1, self.n)]
def _P1Chords(self):
return [([(self.c1, self.c1+1)], []), ([], [(self.c2-1, self.c2)])]
def _P2Chords(self):
return [([(x, self.c1+1)], [(x, self.c1)]) for x in range(self.c1)] + \
[([(self.c2, x)], [(self.c2-1, x)])
for x in range(self.c2+1, self.n)]
def _P3Chords(self):
return [([(self.c1, self.u)], []), ([], [(self.d, self.c2)])]
def _P4Chords(self):
return [([(x, self.u)], [(x, self.c1)]) for x in range(0, self.c1)] + \
[([(self.c2, x)], [(self.d, x)]) for x in range(self.c2+1, self.n)]
def _getChordsList(self):
if self.orientation == NEG:
if self.is_degenerate:
return [self._N1Chords, self._N2Chords]
else:
return [self._N1Chords, self._N2Chords, self._N3Chords,
self._N4Chords]
else:
if self.is_degenerate:
return [self._P1Chords, self._P2Chords]
else:
return [self._P1Chords, self._P2Chords, self._P3Chords,
self._P4Chords]
| 29,086
| 41.094067
| 80
|
py
|
bfh_python
|
bfh_python-master/autocompleteda.py
|
"""Auto-completion of arrows in a type DA structure, by solving certain
equations in linear algebra.
This module is used to produce arrows in the local type DA structure for
arcslides, in arcslidedatest.py.
"""
from algebra import solveOverF2
from algebra import E0
from dastructure import DAStructure, MorDAtoDAGenerator
from extendbyid import LocalDAStructure
from linalg import F2RowSystem
from localpmc import LocalStrandDiagram
from utility import memorizeHash
import itertools
from queue import Queue
class _DAArrow(tuple):
"""Structure representing a type DA arrow."""
def __new__(cls, coeff_d, coeffs_a, source, target):
return tuple.__new__(cls, (coeff_d, coeffs_a, source, target))
class _AutoCompleteDAStructure:
"""A routine to complete arrows in type DA structures by solving certain
equations in linear algebra.
"""
def _getDerivedTwoStepArrows(self, arrows_base_left, arrows_base_right,
arrow_new):
"""Find all ways of deriving two-step arrows from arrow_new, including
by combining it with elements of arrows_base_left and arrows_base_right.
"""
result = []
coeff_d, coeffs_a, x, y = arrow_new
if len(coeffs_a) > 0 and coeffs_a[0].isIdempotent():
return result
# Take anti-differential of one of coeffs_a
for i in range(len(coeffs_a)):
for anti_da, coeff in list(coeffs_a[i].antiDiff().items()):
result.append(_DAArrow(
coeff_d, coeffs_a[:i] + (anti_da,) + coeffs_a[i+1:], x, y))
for (a, b), coeff in list(coeffs_a[i].factor().items()):
if a.isIdempotent() or b.isIdempotent():
continue
result.append(_DAArrow(
coeff_d, coeffs_a[:i] + (a, b) + coeffs_a[i+1:], x, y))
# Take differential of coeff_d
for dd, coeff in list(coeff_d.diff().items()):
result.append(_DAArrow(dd, coeffs_a, x, y))
# Multiply two together. One direction.
for coeff_d2, coeffs_a2, x2, y2 in arrows_base_left:
if len(coeffs_a2) > 0 and coeffs_a2[0].isIdempotent():
continue
if y2 == x and coeff_d2 * coeff_d != E0:
result.append(_DAArrow(
(coeff_d2 * coeff_d).getElt(), coeffs_a2 + coeffs_a, x2, y))
# The other direction.
for coeff_d2, coeffs_a2, x2, y2 in arrows_base_right:
if len(coeffs_a2) > 0 and coeffs_a2[0].isIdempotent():
continue
if y == x2 and coeff_d * coeff_d2 != E0:
result.append(_DAArrow(
(coeff_d * coeff_d2).getElt(), coeffs_a + coeffs_a2, x, y2))
return result
def _getAltFactorizations(self, arrows_base_left_map, arrows_base_right_map,
arrow_new):
"""Find all one-step-arrows that can produce the two-step-arrow
arrow_new. Here arrows_base_left and arrows_base_right are each passed
as two maps. Each arrow (coeff_d, coeffs_a, x, z) in arrows_base_left is
indexed in arrows_base_left_map as (coeff_d, coeffs_a, x) -> z, and each
arrow (coeff_d, coeffs_a, z, y) in arrows_base_right is indexed in
arrows_base_right_map as (coeff_d, coeffs_a, y) -> z.
"""
result = []
coeff_d, coeffs_a, x, y = arrow_new
# Take differential of one of coeffs_a
for i in range(len(coeffs_a)):
for da, coeff in list(coeffs_a[i].diff().items()):
result.append(_DAArrow(
coeff_d, coeffs_a[:i] + (da,) + coeffs_a[i+1:], x, y))
if i > 0 and coeffs_a[i-1] * coeffs_a[i] != E0:
result.append(_DAArrow(
coeff_d, coeffs_a[:i-1] + \
((coeffs_a[i-1] * coeffs_a[i]).getElt(),) +
coeffs_a[i+1:], x, y))
# Take anti-differential of coeff_d
for anti_dd, coeff in list(coeff_d.antiDiff().items()):
result.append(_DAArrow(anti_dd, coeffs_a, x, y))
# Split into two sequences
for (a, b), coeff in list(coeff_d.factor().items()):
# Number of A-inputs in the first sequence
for i in range(len(coeffs_a)):
if (a, coeffs_a[:i], x) in arrows_base_left_map:
for z in arrows_base_left_map[(a, coeffs_a[:i], x)]:
result.append(_DAArrow(b, coeffs_a[i:], z, y))
if (b, coeffs_a[i:], y) in arrows_base_right_map:
for z in arrows_base_right_map[(b, coeffs_a[i:], y)]:
result.append(_DAArrow(a, coeffs_a[:i], x, z))
return result
def _getAltIdempotents(self, arrows):
"""Arrows is a list of tuples (coeff_d, coeffs_a).
single_idems is a list of tuples (idem_d, idem_a), specifying the ID of
single idempotents on the D-side and A-side.
Returns a list of arrows that are different from those in the input by
only the single idempotents.
"""
def uses_idempotent(alg_gen, idem):
return idem in alg_gen.left_idem or idem in alg_gen.right_idem
def has_singlehor(alg_gen, idem):
return idem in alg_gen.single_hor
def add_singlehor(alg_gen, idem):
return LocalStrandDiagram(
alg_gen.parent, [idem] + list(alg_gen.left_idem),
alg_gen.strands)
def remove_singlehor(alg_gen, idem):
return LocalStrandDiagram(
alg_gen.parent, [i for i in alg_gen.left_idem if i != idem],
alg_gen.strands)
results = []
for i in range(len(self.single_idems)):
idem_d, idem_a = self.single_idems[i]
for coeff_d, coeffs_a, x, y in results + arrows:
if x in self.da_left.u_maps[i] and \
y in self.da_right.u_maps[i] and \
has_singlehor(coeff_d, idem_d) and \
all([has_singlehor(coeff, idem_a)
for coeff in coeffs_a]):
new_arrow = _DAArrow(
remove_singlehor(coeff_d, idem_d),
tuple([remove_singlehor(coeff, idem_a)
for coeff in coeffs_a]),
self.da_left.u_maps[i][x],
self.da_right.u_maps[i][y])
if not new_arrow in results + arrows:
results.append(new_arrow)
if (x in self.da_left.uinv_maps[i] and \
y in self.da_right.uinv_maps[i] and \
(not uses_idempotent(coeff_d, idem_d)) and \
all([not uses_idempotent(coeff, idem_a)
for coeff in coeffs_a])):
new_arrow = _DAArrow(
add_singlehor(coeff_d, idem_d),
tuple([add_singlehor(coeff, idem_a)
for coeff in coeffs_a]),
self.da_left.uinv_maps[i][x],
self.da_right.uinv_maps[i][y])
if not new_arrow in results + arrows:
results.append(new_arrow)
return results
def _autoCompleteByLinAlg(self, arrows_base_left, arrows_base_right,
arrows_new):
"""Auto-complete arrows by solving a system of linear equations mod 2.
Use only when it is clear that no two added arrows can be composed
together.
Returns the list of suggested arrows.
"""
# Complete arrows_new with alternative idempotents.
arrows_new_set = set(arrows_new)
init_alt_idems = []
for arrow in arrows_new:
for arrow_alt in self._getAltIdempotents([arrow]):
if arrow_alt not in arrows_new_set:
arrows_new_set.add(arrow_alt)
init_alt_idems.append(arrow_alt)
arrows_new = list(arrows_new_set)
# First step: find all possible arrows, and all possible two-step
# arrows.
one_step_arrows = set()
two_step_arrows = set()
one_step_arrows_queue = Queue()
two_step_arrows_queue = Queue()
for arrow in arrows_new:
one_step_arrows_queue.put(arrow)
one_step_arrows.add(arrow)
# Form the arrow base maps for faster computation in
# _getAltFactorizations.
arrows_base_left_map = dict()
arrows_base_right_map = dict()
for coeff_d, coeffs_a, source, target in arrows_base_left:
key = (coeff_d, coeffs_a, source)
if key not in arrows_base_left_map:
arrows_base_left_map[key] = []
arrows_base_left_map[key].append(target)
for coeff_d, coeffs_a, source, target in arrows_base_right:
key = (coeff_d, coeffs_a, target)
if key not in arrows_base_right_map:
arrows_base_right_map[key] = []
arrows_base_right_map[key].append(source)
while not one_step_arrows_queue.empty() or \
not two_step_arrows_queue.empty():
if not one_step_arrows_queue.empty():
cur_arrow = one_step_arrows_queue.get()
for arrow in self._getDerivedTwoStepArrows(
arrows_base_left, arrows_base_right, cur_arrow):
if arrow not in two_step_arrows:
two_step_arrows.add(arrow)
two_step_arrows_queue.put(arrow)
for arrow in self._getAltIdempotents([cur_arrow]):
if arrow not in one_step_arrows:
one_step_arrows.add(arrow)
one_step_arrows_queue.put(arrow)
else:
cur_arrow = two_step_arrows_queue.get()
for arrow in self._getAltFactorizations(
arrows_base_left_map, arrows_base_right_map, cur_arrow):
coeff_d, coeffs_a, x, y = arrow
# HACK - it appears considering one_step_arrows with at most
# four algebra inputs is sufficient. In the autocompletion
# for the anti-braid case, there are infinitely many
# reachable one_step_arrows, so we place this limit.
# Can change to 2 or 3 to see if simpler arrows are
# possible.
if len(coeffs_a) > 4:
continue
if arrow not in one_step_arrows:
one_step_arrows.add(arrow)
one_step_arrows_queue.put(arrow)
# Combine some of the one-step arrows
combined_one_step_arrows = [] # list of lists of arrows
while len(one_step_arrows) != 0:
arrow = one_step_arrows.pop()
coeff_d, coeffs_a, gen_from, gen_to = arrow
valid = True
for idem_d, idem_a in self.single_idems:
if idem_a in coeff_d.single_hor and \
not all(
idem_d in coeff_a.single_hor for coeff_a in coeffs_a):
valid = False
break
if not valid:
# These arrows should not be considered.
continue
alt_idems = self._getAltIdempotents([arrow])
for alt_idem in alt_idems:
one_step_arrows.remove(alt_idem)
combined_one_step_arrows.append([arrow] + alt_idems)
# Generate the matrix mapping from one-step arrows to two-step arrows
num_row, num_col = len(combined_one_step_arrows), len(two_step_arrows)
matrix_entries = set()
target_vec = set()
two_step_arrows_dict = dict() # index the two step arrows.
two_step_arrows = list(two_step_arrows)
for i in range(len(two_step_arrows)):
two_step_arrows_dict[two_step_arrows[i]] = i
for i in range(len(combined_one_step_arrows)):
for one_step_arrow in combined_one_step_arrows[i]:
derived_two_steps = self._getDerivedTwoStepArrows(
arrows_base_left, arrows_base_right, one_step_arrow)
for two_step_arrow in derived_two_steps:
j = two_step_arrows_dict[two_step_arrow]
if one_step_arrow in arrows_new:
if j in target_vec:
target_vec.remove(j)
else:
target_vec.add(j)
else:
if (i, j) in matrix_entries:
matrix_entries.remove((i, j))
else:
matrix_entries.add((i, j))
comb = solveOverF2(num_row, num_col, list(matrix_entries),
list(target_vec))
assert comb is not None
result = init_alt_idems
for term in comb:
if combined_one_step_arrows[term][0] not in arrows_new:
result.extend(combined_one_step_arrows[term])
return result
def _arrows_to_string(self, arrow_set):
output_strs = set() # remove duplicates
for coeff_d, coeffs_a, gen_from, gen_to in arrow_set:
has_class_from = isinstance(gen_from.name, str) and \
'_' in gen_from.name
has_class_to = isinstance(gen_to.name, str) and '_' in gen_to.name
if has_class_from or has_class_to:
n1 = gen_from.name[0:1] if has_class_from else "-1"
n2 = gen_to.name[0:1] if has_class_to else "-1"
output_strs.add("(%s, %s, %s)," % (n1, n2, ", ".join(
coeff.inputForm()
for coeff in list(coeffs_a) + [coeff_d])))
else:
output_strs.add("(%s)," % ", ".join(
coeff.inputForm()
for coeff in list(coeffs_a) + [coeff_d]))
return "\n".join(output_strs)
def complete(self, raw_da, d_side_order):
"""Input raw_da is a local type DA structure with all generators and
some arrows filled in. This function adds arrows to raw_da so that it
satisfies the type DA structure equation, as well as the smearability
condition.
"""
# Initialize the needed values.
assert isinstance(raw_da, LocalDAStructure)
self.da_left = self.da_right = raw_da
# Prepare single idems in raw_da as a list of pairs (idem_d, idem_a).
self.single_idems = []
for i in range(raw_da.num_single_idems):
self.single_idems.append(
(raw_da.single_idems1[i], raw_da.single_idems2[i]))
# Complete in stages, each adding one interval on the D side, according
# to d_side_order.
for i in range(len(d_side_order)):
# Lists of _DAArrow objects. In this case, arrows_base_left and
# arrows_base_right are the same.
arrows_base, arrows_seed = [], []
# Figure out the base arrows and new arrows in this stage.
for (gen_from, coeffs_a), target in list(raw_da.da_action.items()):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
arrow = _DAArrow(coeff_d, coeffs_a, gen_from, gen_to)
if all([coeff_d.multiplicity[p] == 0
for p in d_side_order[i+1:]]):
mult_at_i = coeff_d.multiplicity[d_side_order[i]]
if mult_at_i == 0:
arrows_base.append(arrow)
else:
assert mult_at_i == 1
arrows_seed.append(arrow)
arrows_new = self._autoCompleteByLinAlg(
arrows_base, arrows_base, arrows_seed)
for coeff_d, coeffs_a, gen_from, gen_to in arrows_new:
raw_da.addDelta(gen_from, gen_to, coeff_d, coeffs_a, 1)
### Uncomment to see the added arrows.
# if i == 0:
# print "# Initial patterns: %s\n" % \
# self._arrows_to_string(arrows_base)
# print "# Step %d, D side position %d:" % (i+1, d_side_order[i])
# print "# Seed arrows:\n%s\n# New arrows:\n%s\n" % \
# (self._arrows_to_string(arrows_seed),
# self._arrows_to_string(arrows_new))
# Final check
assert raw_da.testDelta()
return raw_da
def completeMorphism(self, dastr1, dastr2, raw_morphism):
"""Input two local DA structures dastr1 and dastr2, and an incomplete
morphism between the two. Add arrows to the morphism so that it
satisfies the structure equations, as well as the smearability
condition.
"""
# Initialize the needed values.
assert isinstance(dastr1, LocalDAStructure)
assert isinstance(dastr2, LocalDAStructure)
assert dastr1.algebra1 == dastr2.algebra1
assert dastr1.algebra2 == dastr2.algebra2
assert dastr1.single_idems1 == dastr2.single_idems1
assert dastr1.single_idems2 == dastr2.single_idems2
self.da_left, self.da_right = dastr1, dastr2
self.raw_morphism = raw_morphism
# Prepare single idems in dastr1 (or dastr2) as a list of pairs
# (idem_d, idem_a).
self.single_idems = []
for i in range(dastr1.num_single_idems):
self.single_idems.append(
(dastr1.single_idems1[i], dastr1.single_idems2[i]))
arrows_base_left, arrows_base_right, arrows_seed = [], [], []
for (gen_from, coeffs_a), target in list(dastr1.da_action.items()):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
arrows_base_left.append(
_DAArrow(coeff_d, coeffs_a, gen_from, gen_to))
for (gen_from, coeffs_a), target in list(dastr2.da_action.items()):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
arrows_base_right.append(
_DAArrow(coeff_d, coeffs_a, gen_from, gen_to))
for gen in raw_morphism:
x, (coeff_d, coeffs_a), y = gen.source, gen.coeff, gen.target
arrows_seed.append(
_DAArrow(coeff_d, tuple(coeffs_a), x, y))
arrows_new = self._autoCompleteByLinAlg(
arrows_base_left, arrows_base_right, arrows_seed)
mor_parent = raw_morphism.getElt().parent
for coeff_d, coeffs_a, gen_from, gen_to in arrows_new:
raw_morphism += 1 * MorDAtoDAGenerator(
mor_parent, coeff_d, coeffs_a, gen_from, gen_to)
### Uncomment to see the added arrows
# print "New arrows:\n%s\n" % self._arrows_to_string(arrows_new)
# Final check
assert raw_morphism.diff() == 0
return raw_morphism
def autoCompleteDA(raw_da, d_side_order):
auto = _AutoCompleteDAStructure()
auto.complete(raw_da, d_side_order)
def autoCompleteMorphism(dastr1, dastr2, raw_morphism):
auto = _AutoCompleteDAStructure()
auto.completeMorphism(dastr1, dastr2, raw_morphism)
| 19,362
| 44.56
| 80
|
py
|
bfh_python
|
bfh_python-master/extendbyid.py
|
"""Extension by identity of type DA structures."""
from algebra import TensorGenerator
from algebra import E0
from dastructure import DAGenerator, DAStructure, DATensorDGenerator, \
MorDAtoDAComplex, SimpleDAGenerator, SimpleDAStructure
from dstructure import SimpleDStructure
from grading import GeneralGradingSet, GeneralGradingSetElement
from localpmc import LocalStrandAlgebra, PMCSplitting
from utility import subset
from utility import ACTION_LEFT, ACTION_RIGHT, F2
class ExtendedDAGenerator(SimpleDAGenerator):
"""Represents a generator of the extended DA structure. Stores the generator
of the local DA structure this comes from, and the outer idempotent.
"""
def __init__(self, parent, local_gen, outer_idem, name):
assert local_gen.parent == parent.local_da
assert outer_idem.local_pmc == parent.outer_pmc
self.local_gen = local_gen
self.outer_idem = outer_idem
idem1 = parent.splitting1.joinIdempotent(local_gen.idem1, outer_idem)
idem2 = parent.splitting2.joinIdempotent(local_gen.idem2, outer_idem)
SimpleDAGenerator.__init__(self, parent, idem1, idem2, name)
class LocalDAStructure(SimpleDAStructure):
"""Represents a local type DA structure. So far we always assume that a
local type DA structure is simple (delta map is explicitly given). The extra
data is the map between single idempotents on the two sides, and u_maps
between generators.
"""
def __init__(self, ring, algebra1, algebra2,
side1 = ACTION_LEFT, side2 = ACTION_RIGHT,
single_idems1 = None, single_idems2 = None):
"""single_idems1 and single_idems2 are two lists that order the unpaired
idempotents on the two sides. Idempotents on the two sides that appear
in the same position correspond to each other. If there are 0 or 1
unpaired idempotents, they can be omitted by specifying None. Otherwise
they must be provided.
"""
assert isinstance(algebra1, LocalStrandAlgebra)
assert isinstance(algebra2, LocalStrandAlgebra)
if single_idems1 is None or single_idems2 is None:
self.single_idems1 = algebra1.local_pmc.getSingleIdems()
self.single_idems2 = algebra2.local_pmc.getSingleIdems()
assert len(self.single_idems1) < 2, \
"There are more than two unpaired idempotents."
else:
assert tuple(sorted(algebra1.local_pmc.getSingleIdems())) == \
tuple(sorted(single_idems1))
assert tuple(sorted(algebra2.local_pmc.getSingleIdems())) == \
tuple(sorted(single_idems2))
self.single_idems1 = single_idems1
self.single_idems2 = single_idems2
self.num_single_idems = len(self.single_idems1)
assert self.num_single_idems == len(self.single_idems2)
SimpleDAStructure.__init__(self, ring, algebra1, algebra2, side1, side2)
self.u_maps = [dict() for i in range(self.num_single_idems)]
self.uinv_maps = [dict() for i in range(self.num_single_idems)]
def add_u_map(self, idem_id, source, target):
"""Add to the u-map a mapping from source to target. idem_id refers to
the position in single_idems1 and single_idems2 given in the
constructor.
All entries in u-map remove the corresponding idempotents from idem1 and
idem2.
"""
self.u_maps[idem_id][source] = target
self.uinv_maps[idem_id][target] = source
def auto_u_map(self):
"""Autocompletes the u-maps. To call this function, one of the following
must hold for each generator and each u-map for which it is eligible:
1. The generator already appears as a key in the u_map.
2. There is unique way to choose the target for this generator.
"""
for i in range(self.num_single_idems):
single1, single2 = self.single_idems1[i], self.single_idems2[i]
for local_gen in self.generators:
idem1, idem2 = local_gen.idem1, local_gen.idem2
if single1 in idem1 and single2 in idem2:
# local_gen is eligible for u_maps[i]
if local_gen in self.u_maps[i]:
continue
# Otherwise, check there is a unique target and map there
target_idem1 = idem1.removeSingleHor([single1])
target_idem2 = idem2.removeSingleHor([single2])
target_gen = [gen for gen in self.generators
if gen.idem1 == target_idem1
and gen.idem2 == target_idem2]
assert len(target_gen) == 1, "Cannot autocomplete u-map"
self.add_u_map(i, local_gen, target_gen[0])
def delta(self, MGen, algGens):
if len(algGens) == 1 and algGens[0].isIdempotent() and \
algGens[0].left_idem == MGen.idem2:
return MGen.idem1.toAlgElt() * MGen
elif (MGen, algGens) not in self.da_action:
return E0
else:
return self.da_action[(MGen, algGens)]
class LocalMorDAtoDAComplex(MorDAtoDAComplex):
"""Represents the complex of type DA morphisms between two local type DA
structures.
"""
def __init__(self, ring, source, target):
assert isinstance(source, LocalDAStructure)
assert isinstance(target, LocalDAStructure)
MorDAtoDAComplex.__init__(self, ring, source, target)
def getMappingCone(self, morphism):
"""In addition to what is done in the parent class, need to set up the
u_map.
"""
result = LocalDAStructure(
F2, self.source.algebra1, self.source.algebra2,
self.source.side1, self.source.side2,
self.source.single_idems1, self.source.single_idems2)
gen_map = dict()
for gen in self.source.getGenerators():
gen_map[gen] = SimpleDAGenerator(
result, gen.idem1, gen.idem2, "S_%s" % gen.name)
gen_map[gen].filtration = [0]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for gen in self.target.getGenerators():
gen_map[gen] = SimpleDAGenerator(
result, gen.idem1, gen.idem2, "T_%s" % gen.name)
gen_map[gen].filtration = [1]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for (x1, coeffs_a), target in list(self.source.da_action.items()):
for (coeff_d, x2), ring_coeff in list(target.items()):
result.addDelta(
gen_map[x1], gen_map[x2], coeff_d, coeffs_a, ring_coeff)
for (y1, coeffs_a), target in list(self.target.da_action.items()):
for (coeff_d, y2), ring_coeff in list(target.items()):
result.addDelta(
gen_map[y1], gen_map[y2], coeff_d, coeffs_a, ring_coeff)
for gen, ring_coeff in list(morphism.items()):
# coeffs_a is a tuple of A-side inputs
coeff_d, coeffs_a = gen.coeff
result.addDelta(gen_map[gen.source], gen_map[gen.target],
coeff_d, tuple(coeffs_a), ring_coeff)
# Set up u_map
num_single_idems = len(self.source.single_idems1)
for idem_id in range(num_single_idems):
for x, u_x in list(self.source.u_maps[idem_id].items()):
result.add_u_map(idem_id, gen_map[x], gen_map[u_x])
for y, u_y in list(self.target.u_maps[idem_id].items()):
result.add_u_map(idem_id, gen_map[y], gen_map[u_y])
return result
class ExtendedDAStructure(DAStructure):
"""Type DA structure obtained by extension by identity from a local type DA
structure.
"""
def __init__(self, local_da, splitting1, splitting2):
"""Specifies the local type DA structure (local_da, of type
DAStructure), and splittings of the two full PMCs on the two sides (of
type PMCSplitting). The parameters should be consistent in the following
way:
self.local_pmc1 = splitting1.local_pmc = local_da.algebra1.local_pmc
self.local_pmc2 = splitting2.local_pmc = local_da.algebra2.local_pmc
self.outer_pmc = splitting1.outer_pmc = splitting2.outer_pmc
"""
self.local_da = local_da
self.splitting1 = splitting1
self.splitting2 = splitting2
self.pmc1, self.pmc2 = splitting1.pmc, splitting2.pmc
self.outer_pmc = splitting1.outer_pmc
assert self.outer_pmc == splitting2.outer_pmc
self.local_pmc1 = local_da.algebra1.local_pmc
self.local_pmc2 = local_da.algebra2.local_pmc
assert self.local_pmc1 == splitting1.local_pmc
assert self.local_pmc2 == splitting2.local_pmc
self.mapping1 = splitting1.local_mapping
self.mapping2 = splitting2.local_mapping
self.outer_mapping1 = splitting1.outer_mapping
self.outer_mapping2 = splitting2.outer_mapping
self.idem_size1 = self.pmc1.genus
self.idem_size2 = self.pmc2.genus
# Possible values of single assignments, for use in tensorD, delta and
# deltaPrefix (through the function getSingleAssignments).
self.NONE, self.LOCAL, self.OUTER, self.DOUBLE = 0, 1, 2, 3
# Record the local and outer single idempotents.
# Everything is indexed by 0 ... self.num_single-1
self.single_idems1 = self.local_da.single_idems1 # idems in local_pmc1
self.single_idems2 = self.local_da.single_idems2 # idems in local_pmc2
self.num_singles = len(self.single_idems1)
assert self.num_singles == len(self.single_idems2)
self.smeared_idems1 = [] # idems in pmc1
self.smeared_idems2 = [] # idems in pmc2
self.single_idems_outer = [] # idems in outer_pmc
self.single_pts_outer = [] # pts in outer_pmc
for i in range(self.num_singles):
# Fill in data, and verify that the correspondence of idempotents on
# the two sides is consistent on the outer PMC.
single_idems1 = self.single_idems1[i]
single_idems2 = self.single_idems2[i]
single_pt1 = self.local_pmc1.pairs[single_idems1][0]
single_pt2 = self.local_pmc2.pairs[single_idems2][0]
for p in range(self.pmc1.n):
if p in self.mapping1 and self.mapping1[p] == single_pt1:
self.smeared_idems1.append(self.pmc1.pairid[p])
q = self.pmc1.otherp[p]
assert q in self.outer_mapping1
q_outer = self.outer_mapping1[q]
self.single_pts_outer.append(q_outer)
self.single_idems_outer.append(
self.outer_pmc.pairid[q_outer])
for p in range(self.pmc2.n):
if p in self.mapping2 and self.mapping2[p] == single_pt2:
self.smeared_idems2.append(self.pmc2.pairid[p])
q = self.pmc2.otherp[p]
assert q in self.outer_mapping2
assert self.single_pts_outer[-1] == self.outer_mapping2[q]
# Initiate the DA structure
DAStructure.__init__(self, F2, algebra1 = self.pmc1.getAlgebra(),
algebra2 = self.pmc2.getAlgebra(),
side1 = ACTION_LEFT, side2 = ACTION_RIGHT)
# Obtain the set of extended generators, and create a map self.gen_index
# from (local_gen, outer_idem) to the extended generators.
self.generators = []
local_gens = self.local_da.getGenerators()
outer_idems = [idem for idem in self.outer_pmc.getIdempotents()
if all(single_idem_outer not in idem for
single_idem_outer in self.single_idems_outer)]
self.gen_index = dict()
for local_gen in local_gens:
cur_count = 0 # number of generators so far with local_gen
for outer_idem in outer_idems:
if len(local_gen.idem1) + len(outer_idem) != self.idem_size1:
continue
assert len(local_gen.idem2) + len(outer_idem) == self.idem_size2
cur_gen = ExtendedDAGenerator(
self, local_gen, outer_idem,
"%s%%%d" % (local_gen.name, cur_count))
cur_count += 1
if hasattr(local_gen, "filtration"):
cur_gen.filtration = local_gen.filtration
self.generators.append(cur_gen)
self.gen_index[(local_gen, outer_idem)] = cur_gen
def __len__(self):
return len(self.generators)
def getGenerators(self):
return self.generators
def adjustLocalMGen(self, local_MGen, alg_local0):
"""Assigning the smeared idempotents for the starting generator,
according to the rule that local_MGen.idem2 (A-side idempotent) must
match the left idempotent of the first algebra input (if there is any).
"""
for i in range(self.num_singles):
single2 = self.single_idems2[i]
if single2 in local_MGen.idem2 and \
single2 not in alg_local0.left_idem:
if local_MGen not in self.local_da.u_maps[i]:
return None # need test case
local_MGen = self.local_da.u_maps[i][local_MGen]
return local_MGen
def testPrefix(self, local_MGen, algs_local):
"""Query deltaPrefix for the given set of local algebra inputs. Perform
the adjustment on local_MGen if necessary.
"""
if len(algs_local) > 0:
local_MGen = self.adjustLocalMGen(local_MGen, algs_local[0])
if local_MGen is None:
return False
return self.local_da.deltaPrefix(local_MGen, tuple(algs_local))
def extendRestrictions(self, last_assign, algs_local, prod_d, new_alg):
"""Update the idempotent assignments when a new algebra input, new_alg,
is considered. Apply possible changes to previous idempotent assignments
to both algs_local and prod_d. Adds the local restriction of new_alg to
algs_local, but does NOT multiply outer restriction of new_alg to
prod_d (this is done in a separate function getNewProdD for efficiency
considerations.
"""
# Update single assignments
new_assign = []
for i in range(self.num_singles):
idem_id = self.smeared_idems2[i]
if idem_id in new_alg.double_hor:
# Double horizontal in the new algebra element. Just continue
# the previous assignment.
assert last_assign[i] != self.NONE
new_assign.append(last_assign[i])
else:
# First determine new assignment.
if idem_id in new_alg.right_idem:
end_pt = [t for s, t in new_alg.strands
if self.pmc2.pairid[t] == idem_id]
assert len(end_pt) == 1
end_pt = end_pt[0]
if end_pt in self.mapping2:
new_assign.append(self.LOCAL)
else:
new_assign.append(self.OUTER)
else:
new_assign.append(self.NONE)
# Now correct previous assignment if necessary.
if idem_id in new_alg.left_idem:
assert last_assign[i] != self.NONE
start_pt = [s for s, t in new_alg.strands
if self.pmc2.pairid[s] == idem_id]
assert len(start_pt) == 1
start_pt = start_pt[0]
if start_pt in self.mapping2:
if last_assign[i] == self.OUTER:
return (None, None, None) # conflict
else:
if last_assign[i] == self.LOCAL:
return (None, None, None) # conflict
elif last_assign[i] == self.DOUBLE:
# Previous assignment changes from DOUBLE to local.
# Need to update algs_local and prod_d.
to_remove = (self.single_idems2[i],)
algs_local = [alg.removeSingleHor(to_remove)
for alg in algs_local]
to_add = (self.single_idems_outer[i],)
prod_d = prod_d.addSingleHor(to_add)
# Restrict current algebra element to local and form new_local.
new_local = [alg for alg in algs_local]
cur_alg_local = self.splitting2.restrictStrandDiagramLocal(new_alg)
idems_to_remove = [self.single_idems2[single_id]
for single_id in range(self.num_singles)
if new_assign[single_id] == self.OUTER]
cur_alg_local = cur_alg_local.removeSingleHor(tuple(idems_to_remove))
if len(new_local) != 0:
assert new_local[-1].right_idem == cur_alg_local.left_idem
new_local.append(cur_alg_local)
return (new_assign, new_local, prod_d)
def getNewProdD(self, new_assign, new_alg, last_prod_d):
"""Multiplies the outer restriction of new_alg onto last_prod_d."""
outer_sd = self.splitting2.restrictStrandDiagramOuter(new_alg)
outer_sd = outer_sd.removeSingleHor(tuple(
[self.single_idems_outer[single_id]
for single_id in range(self.num_singles)
if new_assign[single_id] in (self.LOCAL, self.DOUBLE)]))
assert last_prod_d.right_idem == outer_sd.left_idem
new_prod_d = last_prod_d * outer_sd
if new_prod_d == 0:
return None
else:
return new_prod_d.getElt()
def getAssignments(self, MGen, algs):
"""Returns the triple (assignment, algs_local, prod_d)."""
assignment = [self.DOUBLE] * self.num_singles
algs_local = []
prod_d = self.splitting2.restrictIdempotentOuter(MGen.idem2).toAlgElt()
prod_d = prod_d.removeSingleHor()
for alg in algs:
assignment, algs_local, prod_d = self.extendRestrictions(
assignment, algs_local, prod_d, alg)
if assignment is None:
return (None, None, None)
prod_d = self.getNewProdD(assignment, alg, prod_d)
if prod_d is None:
return (None, None, None)
return (assignment, algs_local, prod_d)
def joinOutput(self, local_d, local_y, outer_d):
"""Joins local_d and outer_d. Adjust idempotents if necessary."""
alg_d = self.splitting1.joinStrandDiagram(local_d, outer_d)
if alg_d is None:
return (None, None)
outer_idem = outer_d.right_idem
for i in range(self.num_singles):
single1 = self.single_idems1[i]
single_outer = self.single_idems_outer[i]
if single_outer in outer_idem:
# If the split idempotent ended up on the outside, switch it to
# the inside.
if single1 not in local_y.idem1:
local_y = self.local_da.uinv_maps[i][local_y]
outer_idem = outer_idem.removeSingleHor([single_outer])
y = self.gen_index[(local_y, outer_idem)]
return (alg_d, y)
def tensorD(self, dstr):
"""Compute the box tensor product DA * D of this bimodule with the given
type D structure. Returns the resulting type D structure. Uses delta()
and deltaPrefix() functions of this type DA structure.
"""
dstr_result = SimpleDStructure(F2, self.algebra1)
# Compute list of generators in the box tensor product
for gen_left in self.getGenerators():
for gen_right in dstr.getGenerators():
if gen_left.idem2 == gen_right.idem:
dstr_result.addGenerator(DATensorDGenerator(
dstr_result, gen_left, gen_right))
def search(start_gen, cur_dgen, algs, last_assign, algs_local,
last_prod_d):
"""Searching for an arrow in the box tensor product.
- start_gen: starting generator in the box tensor product. The
resulting arrow will start from here.
- cur_dgen: current location in the type D structure.
- algs: current list of A-side inputs to the type DA structure (or
alternatively, list of algebra outputs produced by the existing
path through the type D structure).
- algs_local: current list of local restrictions of algs.
- last_assign: a list of length self.num_singles. For each split
idempotent, specify the single assignments at the last algebra
input.
- prod_d: product of the outer restrictions, except for the last
algebra input.
"""
start_dagen, start_dgen = start_gen
local_MGen = start_dagen.local_gen
# Preliminary tests
if len(algs) > 0:
assert algs[0].left_idem == start_dagen.idem2
for i in range(len(algs)-1):
assert algs[i].right_idem == algs[i+1].left_idem
if any(alg.isIdempotent() for alg in algs):
return
# First, adjust local module generator, and check for delta.
if len(algs_local) > 0:
local_MGen = self.adjustLocalMGen(local_MGen, algs_local[0])
if local_MGen is None:
return
local_delta = self.local_da.delta(local_MGen, tuple(algs_local))
has_delta = (local_delta != E0)
# Second, check for delta prefix.
has_delta_prefix = False
if len(algs) == 0:
has_delta_prefix = True
else:
dbls = [self.single_idems2[i] for i in range(self.num_singles)
if last_assign[i] == self.DOUBLE]
for to_remove in subset(dbls):
if len(to_remove) != 0:
cur_algs_local = tuple([alg.removeSingleHor(to_remove)
for alg in algs_local])
else:
cur_algs_local = algs_local
if self.testPrefix(local_MGen, cur_algs_local):
has_delta_prefix = True
break
if (not has_delta) and (not has_delta_prefix):
return
# Now, compute new prod_d.
if len(algs) > 0:
prod_d = self.getNewProdD(last_assign, algs[-1], last_prod_d)
else:
prod_d = last_prod_d
if prod_d is None:
return
# If has_delta is True, add to delta
for (local_d, local_y), ring_coeff in list(local_delta.items()):
alg_d, y = self.joinOutput(local_d, local_y, prod_d)
if alg_d is not None:
dstr_result.addDelta(start_gen, DATensorDGenerator(
dstr_result, y, cur_dgen), alg_d, 1)
if not has_delta_prefix:
return
for (new_alg, dgen_to), ring_coeff in list(dstr.delta(cur_dgen).items()):
new_assign, new_local, last_prod_d = self.extendRestrictions(
last_assign, algs_local, prod_d, new_alg)
if new_assign is not None:
search(start_gen, dgen_to, algs + [new_alg],
new_assign, new_local, last_prod_d)
# Perform search for each generator in dstr_result.
for x in dstr_result.getGenerators():
dagen, dgen = x
prod_d = \
self.splitting2.restrictIdempotentOuter(dagen.idem2).toAlgElt()
prod_d = prod_d.removeSingleHor() # always goes to LOCAL
search(x, dgen, [], [self.DOUBLE] * self.num_singles, [], prod_d)
# Add arrows coming from idempotent output on the D-side
for (coeff_out, dgen_to), ring_coeff in list(dstr.delta(dgen).items()):
if coeff_out.isIdempotent():
dstr_result.addDelta(
x, DATensorDGenerator(dstr_result, dagen, dgen_to),
dagen.idem1.toAlgElt(self.algebra1), 1)
# Find grading set if available on both components
def tensorGradingSet():
"""Find the grading set of the new type D structure."""
return GeneralGradingSet([self.gr_set, dstr.gr_set])
def tensorGrading(gr_set, dagen, dgen):
"""Find the grading of the generator (x, y) in the tensor type D
structure. The grading set need to be provided as gr_set.
"""
return GeneralGradingSetElement(
gr_set, [self.grading[dagen], dstr.grading[dgen]])
if hasattr(self, "gr_set") and hasattr(dstr, "gr_set"):
dstr_result.gr_set = tensorGradingSet()
dstr_result.grading = dict()
for x in dstr_result.getGenerators():
dagen, dgen = x
dstr_result.grading[x] = tensorGrading(
dstr_result.gr_set, dagen, dgen)
return dstr_result
def delta(self, MGen, algGens):
# Preliminary tests
if len(algGens) > 0 and algGens[0].left_idem != MGen.idem2:
return E0
if any([algGens[i].right_idem != algGens[i+1].left_idem
for i in range(len(algGens)-1)]):
return E0
if any([alg.isIdempotent() for alg in algGens]):
return E0
assignment, algs_local, prod_d = self.getAssignments(MGen, algGens)
if assignment is None:
return E0
local_MGen = MGen.local_gen
if len(algs_local) > 0:
local_MGen = self.adjustLocalMGen(local_MGen, algs_local[0])
if local_MGen is None:
return E0
local_delta = self.local_da.delta(local_MGen, tuple(algs_local))
if local_delta == 0:
return E0
result = E0
for (local_d, local_y), ring_coeff in list(local_delta.items()):
alg_d, y = self.joinOutput(local_d, local_y, prod_d)
result += 1 * TensorGenerator((alg_d, y), self.AtensorM)
return result
def deltaPrefix(self, MGen, algGens):
# Preliminary tests
if len(algGens) == 0:
return True
if algGens[0].left_idem != MGen.idem2:
return False
if any([algGens[i].right_idem != algGens[i+1].left_idem
for i in range(len(algGens)-1)]):
return False
assignment, algs_local, prod_d = self.getAssignments(MGen, algGens)
if assignment is None:
return E0
local_MGen = MGen.local_gen
dbls = [self.single_idems2[i] for i in range(self.num_singles)
if assignment[i] == self.DOUBLE]
for to_remove in subset(dbls):
if len(to_remove) != 0:
cur_algs_local = tuple([alg.removeSingleHor(to_remove)
for alg in algs_local])
else:
cur_algs_local = algs_local
if self.testPrefix(local_MGen, cur_algs_local):
return True
return False
def identityDALocal(local_pmc):
"""Returns the identity type DA structure for a given local PMC.
Actually the same as the non-local case, except we don't have Heegaard
diagrams.
"""
alg = local_pmc.getAlgebra()
single_idems = local_pmc.getSingleIdems()
dastr = LocalDAStructure(F2, alg, alg, single_idems1 = single_idems,
single_idems2 = single_idems)
idems = local_pmc.getIdempotents()
idem_to_gen_map = {}
for i in range(len(idems)):
cur_gen = SimpleDAGenerator(dastr, idems[i], idems[i], i)
idem_to_gen_map[idems[i]] = cur_gen
dastr.addGenerator(cur_gen)
alg_gen = alg.getGenerators()
for gen in alg_gen:
if not gen.isIdempotent():
gen_from = idem_to_gen_map[gen.getLeftIdem()]
gen_to = idem_to_gen_map[gen.getRightIdem()]
dastr.addDelta(gen_from, gen_to, gen, (gen,), 1)
dastr.auto_u_map()
return dastr
| 28,828
| 44.257457
| 85
|
py
|
bfh_python
|
bfh_python-master/latex.py
|
"""A collection of latex printing code."""
from utility import sumColumns
from functools import cmp_to_key
def beginDoc():
return "\\documentclass{article}\n" + \
"\\usepackage{tikz}\n" + \
"\\begin{document}\n"
def endDoc():
return "\\end{document}\n"
def beginTikz(scale):
return "\\begin{tikzpicture} [x=%dpt, y=%dpt," % (scale, scale) + \
"baseline=(current bounding box.center)]\n"
def endTikz():
return "\\end{tikzpicture}\n"
def showLocalStrandDiagram(sd, pmc_map = None):
"""sd is the local strand diagram to be displayed. pmc_map is a mapping from
points on the local PMC to vertical location in the displayed picture.
Defaults to i -> i for all 0 <= i < n.
"""
result = ""
local_pmc = sd.local_pmc
n = local_pmc.n
if pmc_map is not None:
assert len(pmc_map) == n
else:
pmc_map = [i for i in range(n)]
# Header
result += beginTikz(10)
# Display a dummy line, to align the strand diagrams and idempotents
min_pos, max_pos = min(pmc_map), max(pmc_map)
result += "\\draw [color=white] (3, %d) to (3, %d);\n" \
% (min_pos - 1, max_pos + 1)
# Display dots for the local PMC.
for i in range(n):
if i not in local_pmc.endpoints:
result += "\\filldraw (0, %d) circle (0.1);\n" % pmc_map[i]
result += "\\filldraw (3, %d) circle (0.1);\n" % pmc_map[i]
# Display single and double horizontal lines.
for idem in sd.all_hor:
for p in local_pmc.pairs[idem]:
result += "\draw [dashed] (0, %d) to (3, %d);\n" \
% (pmc_map[p], pmc_map[p])
# Display strands.
for start, end in sd.strands:
result += "\draw [->] (0, %d) to [out=0, in=180] (3, %d);\n" \
% (pmc_map[start], pmc_map[end])
# Footer
result += endTikz()
return result
def showDAGenerator(gen, pmc_map1, pmc_map2):
"""Show a generator of the type DA structure."""
result = ""
# Header
result += beginTikz(10)
local_pmc1 = gen.idem1.local_pmc
local_pmc2 = gen.idem2.local_pmc
n1 = local_pmc1.n
n2 = local_pmc2.n
# Display local PMC dots and middle vertible line.
for i in range(n1):
if i not in local_pmc1.endpoints:
result += "\\filldraw (0, %d) circle (0.1);\n" % pmc_map1[i]
for i in range(n2):
if i not in local_pmc2.endpoints:
result += "\\filldraw (6, %d) circle (0.1);\n" % pmc_map2[i]
min_pos = min(min(pmc_map1), min(pmc_map2))
max_pos = max(max(pmc_map1), max(pmc_map2))
result += "\\draw (3, %d) to (3, %d);\n" % (min_pos - 1, max_pos + 1)
# Display single and double horizontals
for idem in gen.idem1:
for p in local_pmc1.pairs[idem]:
result += "\\draw [dashed] (0, %d) to (3, %d);\n" \
% (pmc_map1[p], pmc_map1[p])
for idem in gen.idem2:
for p in local_pmc2.pairs[idem]:
result += "\\draw [dashed] (3, %d) to (6, %d);\n" \
% (pmc_map2[p], pmc_map2[p])
# Footer
result += endTikz()
return result
def showDAStructure(dastr, pmc_map1 = None, pmc_map2 = None):
"""Show a type DA structure."""
result = ""
n1 = dastr.algebra1.local_pmc.n
n2 = dastr.algebra2.local_pmc.n
if pmc_map1 is not None:
assert len(pmc_map1) == n1
else:
pmc_map1 = [i for i in range(n1)]
if pmc_map2 is not None:
assert len(pmc_map2) == n2
else:
pmc_map2 = [i for i in range(n2)]
to_print = []
for (gen_from, coeffs_a), target in list(dastr.da_action.items()):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
to_print.append((gen_from, coeffs_a, coeff_d, gen_to))
# Sort the to_print by the following comparison function
def compare_arrow(arrow1, arrow2):
gen_from1, coeffs_a1, coeff_d1, gen_to1 = arrow1
gen_from2, coeffs_a2, coeff_d2, gen_to2 = arrow2
def compare_mult(mult1, mult2):
if sum(mult1) != sum(mult2):
return sum(mult1) - sum(mult2)
mult1, mult2 = list(reversed(mult1)), list(reversed(mult2))
if mult1 < mult2:
return -1
if mult1 > mult2:
return 1
return 0
mult_d1, mult_d2 = coeff_d1.multiplicity, coeff_d2.multiplicity
if compare_mult(mult_d1, mult_d2) != 0:
return compare_mult(mult_d1, mult_d2)
if len(coeff_d1.strands) != len(coeff_d2.strands):
return len(coeff_d1.strands) - len(coeff_d2.strands)
mult_a1 = sumColumns([coeff.multiplicity for coeff in coeffs_a1], n1-1)
mult_a2 = sumColumns([coeff.multiplicity for coeff in coeffs_a2], n2-1)
return compare_mult(mult_a1, mult_a2)
for gen_from, coeffs_a, coeff_d, gen_to in sorted(to_print,
key = cmp_to_key(compare_arrow)):
result += "\\begin{equation}\n"
result += "\\delta^1\\left( \n"
result += showDAGenerator(gen_from, pmc_map1, pmc_map2)
for coeff_a in coeffs_a:
result += "~,~\n";
result += showLocalStrandDiagram(coeff_a, pmc_map2)
result += "\\right) \\to \n"
result += showLocalStrandDiagram(coeff_d, pmc_map1)
result += "~\\otimes~ \n"
result += showDAGenerator(gen_to, pmc_map1, pmc_map2)
result += "\\end{equation}\n"
return result
def showArrow(coeff_d, coeffs_a, pmc_map1 = None, pmc_map2 = None):
result = ""
result += "\\begin{equation}\n"
result += "~,~\n".join([showLocalStrandDiagram(coeff_a, pmc_map2)
for coeff_a in coeffs_a])
result += "\\to \n"
result += showLocalStrandDiagram(coeff_d, pmc_map1)
result += "\\end{equation}\n"
return result
| 5,890
| 33.052023
| 87
|
py
|
bfh_python
|
bfh_python-master/identityaatest.py
|
"""Unit test for identityaa.py"""
from identityaa import *
from identityaa import _getIntervalOrdering
from pmc import antipodalPMC, linearPMC, splitPMC
import unittest
class HomotopyAATest(unittest.TestCase):
def testHomotopyAA(self):
HomotopyAA(splitPMC(1)).testHomotopy()
HomotopyAA(splitPMC(2)).testHomotopy()
def testGetIntervalOrdering(self):
tests = [(splitPMC(1), [0,1,2]),
(splitPMC(2), [4,5,6,3,0,1,2]),
(antipodalPMC(2), [2,5,0,3,6,1,4]),
(linearPMC(2), [4,0,1,3,5,6,2])]
for pmc, order in tests:
self.assertEqual(_getIntervalOrdering(pmc), order)
if __name__ == "__main__":
unittest.main()
| 713
| 30.043478
| 62
|
py
|
bfh_python
|
bfh_python-master/signstest.py
|
"""Unit test for signs.py."""
from signs import *
from grading import DEFAULT_REFINEMENT, lowerRefinement
from pmc import PMC
from pmc import antipodalPMC, linearPMC, splitPMC
from utility import ZZ
import unittest
class AbsZ2GradingTest(unittest.TestCase):
def testAbsGrading(self):
def testOneAlgebra(alg, test_op = True):
abs_gr = AbsZ2Grading(alg)
for gen in alg.getGenerators():
# Test asserts in getAbsGrading
abs_gr.getAbsGrading(gen)
if not test_op:
return
# Test differential and multiplication
for a in alg.getGenerators():
for term in a.diff():
a_gr, da_gr = [abs_gr.getAbsGrading(gen) for gen in (a, term)]
assert (a_gr - 1) % 2 == da_gr
for a in alg.getGenerators():
for b in alg.getGenerators():
if a * b != 0:
a_gr, b_gr, ab_gr = [abs_gr.getAbsGrading(gen)
for gen in (a, b, (a*b).getElt())]
assert (a_gr + b_gr) % 2 == ab_gr
for pmc in [splitPMC(1), splitPMC(2), linearPMC(2)]:
testOneAlgebra(pmc.getAlgebra())
for pmc in [antipodalPMC(2), splitPMC(3)]:
testOneAlgebra(pmc.getAlgebra(), test_op = False)
for (pmc, idem_size) in [(splitPMC(1), 1), (splitPMC(1), 2),
(splitPMC(2), 1), (splitPMC(2), 2)]:
testOneAlgebra(PreStrandAlgebra(F2, pmc, idem_size))
for (pmc, idem_size) in [(splitPMC(2), 3), (splitPMC(2), 4),
(splitPMC(3), 2)]:
testOneAlgebra(PreStrandAlgebra(F2, pmc, idem_size),
test_op = False)
class PreStrandAlgebraTest(unittest.TestCase):
def testGetGenerators(self):
for pmc, idem_size, n in [(splitPMC(1), 1, 10),
(splitPMC(1), 2, 25),
(splitPMC(2), 1, 36),
(splitPMC(2), 2, 462),
(splitPMC(2), 3, 2646),
(splitPMC(2), 4, 6951),
(splitPMC(3), 2, 2431),
(splitPMC(3), 3, 39325)]:
# Further numbers:
# splitPMC(3), 4 --> 359502
algebra = PreStrandAlgebra(F2, pmc, idem_size)
self.assertEqual(n, len(algebra.getGenerators()))
def testDiff(self):
algebra = PreStrandAlgebra(F2, splitPMC(1), 2)
for sd, sd_diff in [([(1, 3), (2, 2)], [(1, 2), (2, 3)]),
([(0, 3), (1, 2)], [(0, 2), (1, 3)])]:
self.assertEqual(PreStrandDiagram(algebra, sd).diff(),
PreStrandDiagram(algebra, sd_diff).elt())
for sd in [[(0, 2), (1, 3)], [(1, 2), (2, 3)]]:
self.assertEqual(PreStrandDiagram(algebra, sd).diff(), E0)
def testSignedDiff(self):
for pmc, idem_size in [(splitPMC(2), 3),
(splitPMC(2), 4),
(splitPMC(3), 3)]:
algebra = PreStrandAlgebra(ZZ, pmc, idem_size)
# Test d^2 = 0.
for gen in algebra.getGenerators():
assert gen.diff().diff() == 0
def testMultiply(self):
algebra = PreStrandAlgebra(F2, splitPMC(1), 2)
for sd1, sd2, prod in [([(0, 0), (1, 3)], [(0, 1), (3, 3)],
[(0, 1), (1, 3)]),
([(0, 3), (1, 1)], [(1, 2), (3, 3)],
[(0, 3), (1, 2)]),
([(1, 3), (0, 0)], [(0, 2), (3, 3)],
[(0, 2), (1, 3)])]:
self.assertEqual(PreStrandDiagram(algebra, sd1) *
PreStrandDiagram(algebra, sd2),
PreStrandDiagram(algebra, prod).elt())
for sd1, sd2 in [([(0, 2), (1, 1)], [(1, 3), (2, 2)])]:
self.assertEqual(PreStrandDiagram(algebra, sd1) *
PreStrandDiagram(algebra, sd2), E0)
# def testSignedMultiply(self):
# for pmc, idem_size in [(splitPMC(1), 1),
# (splitPMC(1), 2),
# (splitPMC(2), 1),
# (splitPMC(2), 2),
# (splitPMC(2), 3),
# (splitPMC(2), 4),
# (splitPMC(3), 2)]:
# algebra = PreStrandAlgebra(ZZ, pmc, idem_size)
# print(algebra)
# for gen1 in algebra.getGenerators():
# for gen2 in algebra.getGeneratorsForPtIdem(
# l_pt_idem = gen1.right_pt_idem):
# if gen1 * gen2 != E0:
# # Test d(ab) = (da)*b + (-1)^gr(a)*a*(db)
# self.assertEqual(
# (gen1 * gen2).diff(), gen1.diff() * gen2 + \
# algebra.grSign(gen1) * gen1 * gen2.diff())
# for gen3 in algebra.getGeneratorsForPtIdem(
# l_pt_idem = gen2.right_pt_idem):
# if gen2 * gen3 != E0:
# # Tests associativity of multiplication
# self.assertEqual(
# (gen1 * gen2) * gen3, gen1 * (gen2 * gen3))
class SignLinAlgTest(unittest.TestCase):
def testCreateRowSystem(self):
sign = SignLinAlg(StrandAlgebra(F2, antipodalPMC(2), idem_size = 2,
mult_one = True))
sign.createRowSystem()
if __name__ == "__main__":
unittest.main()
| 5,832
| 46.040323
| 82
|
py
|
bfh_python
|
bfh_python-master/braid.py
|
"""Handles braids and their type DD structures."""
import sys
from arcslide import Arcslide
from arcslideda import ArcslideDA
from cobordism import Cobordism
from cobordism import LEFT, RIGHT
from cobordismda import CobordismDALeft, CobordismDARight
from dehntwistda import DehnSurgeryDA
from digraph import computeATensorD, computeATensorDD, computeDATensorD
from dstructure import infTypeD, platTypeD, zeroTypeD
from pmc import linearPMC, splitPMC
from utility import memorize
from utility import NEG, POS, PRINT_PROGRESS
class Braid(object):
"""Represents a braid with a fix number of strands. Each braid generator is
represented by an integer n. If 1 <= n <= num_strands-1, it represents
moving strand n over strand n+1. If -(num_strand-1) <= n <= -1, it
represents moving strand |n| under strand |n|+1.
"""
def __init__(self, num_strands):
"""Specifies the number of strands in the braid."""
self.num_strands = num_strands
self.genus = (num_strands - 2)//2
self.pmc = linearPMC(self.genus)
def getArcslides(self, word):
"""Get the sequence of arcslides corresponding to a braid generator or
a list of braid generators.
"""
if isinstance(word, list):
return sum([self.getArcslides(n) for n in word], [])
abs_word = abs(word)
assert 1 <= abs_word <= self.num_strands-1
if abs_word == 1:
slides_info = [(1, 0)]
elif 1 < abs_word < self.num_strands-2:
slides_info = [(2*abs_word-2, 2*abs_word-3)] * 2
elif abs_word == self.num_strands-2:
slides_info = [(2*abs_word-2, 2*abs_word-3)]
else: # abs_word = self.num_strands-1
slides_info = [(4*i-3, 4*i-4) for i in range(self.genus, 0, -1)]
slides_info += [(2*i+2, 2*i+1) for i in range(2*self.genus-2)]
slides = [Arcslide(self.pmc, slides_info[0][0], slides_info[0][1])]
for i in range(1, len(slides_info)):
slides.append(Arcslide(slides[i-1].end_pmc,
slides_info[i][0], slides_info[i][1]))
if word > 0:
return slides
else: # n < 0
return list(reversed([slide.inverse() for slide in slides]))
def composeDD(dstr, ddstr_list, is_dual = False, method = "Tensor"):
"""Successively compute morphisms from DD structures in ``ddstr_list`` to
the type D structure, simplifying at each step.
"""
if PRINT_PROGRESS > 0:
print("(compose %s %d)" % (method, len(ddstr_list)), end=' ')
for ddstr in ddstr_list:
if PRINT_PROGRESS > 0:
sys.stdout.write("%d," % len(dstr))
sys.stdout.flush()
if method == "Mor":
assert not is_dual
dstr = ddstr.morToD(dstr)
elif method == "Tensor":
if is_dual:
dstr = computeATensorDD(dstr, ddstr)
else:
dstr = computeDATensorD(ddstr, dstr)
else:
assert False, "Unknown method"
dstr.simplify()
dstr.reindex()
if PRINT_PROGRESS > 0:
print("%d\n" % len(dstr), end=' ')
return dstr
@memorize
def platTypeD2(genus, is_dual = False):
"""Obtain linear handlebody from inf-framed handlebody by a sequence of
arcslides. As the inf-framed handlebody has absolute grading, we can get
absolute grading on this D structure.
"""
start = infTypeD(genus, is_dual)
slides = []
for i in range(genus-1):
slides += [(4*i+3,4*i+4), (4*i+6,4*i+7), (4*i+5,4*i+6)]
cur_pmc = splitPMC(genus)
for i in range(len(slides)):
b1, c1 = slides[i]
slides[i] = Arcslide(cur_pmc, b1, c1)
cur_pmc = slides[i].end_pmc
if not is_dual:
slides = [slide.inverse() for slide in slides]
slides_dd = [slide.getDDStructure() for slide in slides]
return composeDD(start, slides_dd, is_dual)
class BraidCap(object):
"""Represents a capping of a braid."""
def __init__(self, matching):
"""Specifies the matching of strands."""
self.matching = tuple(matching)
self.num_strands = len(self.matching)
self.genus = (len(self.matching) - 2)//2
def __eq__(self, other):
return self.matching == other.matching
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.matching)
def getLastCobordism(self):
"""Returns the position of the leftmost pair of adjacent points matched
under this braid cap. This is a pair that can be considered as last
added in a sequence of cobordisms forming this braid cap. Also returns
the braid cap before this cobordism.
"""
if self.genus == 0:
return None
# At each step, find the left-most ending point of a matching.
for i in range(len(self.matching)):
if self.matching[i] <= i+1:
assert self.matching[i] == i
cur_move = i-1
new_matching = []
for n in self.matching[:i-1] + self.matching[i+1:]:
assert n != i and n != i+1
if n < i:
new_matching.append(n)
else:
new_matching.append(n-2)
new_cap = BraidCap(new_matching)
return (cur_move, new_cap)
@memorize
def getCobordismSequence(self):
"""Returns a sequence of cobordisms that will close off this braid cap
to two strands (genus-0). The cobordisms are labeled 0-based starting
from the left. The right-most cobordism is never used.
For example:
(4, 3, 2, 1) --> [1]
(2, 1, 4, 3) --> [0]
(6, 5, 4, 3, 2, 1) --> [2, 1]
(6, 3, 2, 5, 4, 1) --> [1, 1]
(2, 1, 6, 5, 4, 3) --> [0, 1]
"""
if self.genus == 0:
return []
else:
last_move, prev_cap = self.getLastCobordism()
return [last_move] + prev_cap.getCobordismSequence()
@memorize
def openCap(self):
"""Returns the type D structure corresponding to this handlebody by
tensoring type DA bimodules with the genus-1 handlebody.
"""
assert self.genus > 0
last_move, prev_cap = self.getLastCobordism()
if self.genus == 1:
if last_move == 0:
return zeroTypeD(1)
else:
return infTypeD(1)
else:
cur_da = CobordismDALeft(Cobordism(self.genus, last_move, LEFT))
dstr = cur_da.tensorD(prev_cap.openCap())
dstr.simplify()
dstr.reindex()
return dstr
def closeCap(self, dstr, cancellation_constraint = None):
"""Computes the chain complex obtained by closing off this cap on dstr.
That is, compute the box tensor product of the type A module
corresponding to this cap with dstr.
This is obtained by tensoring dstr with a sequence of right-side
cobordisms, and finishing off by computing morToD with either
zeroTypeD(1) or infTypeD(1).
"""
assert self.genus > 0
if self.genus <= 3:
# morToD is efficient up to genus = 3.
cx = dstr.morToD(self.openCap())
cx.reindex()
cx.simplify(cancellation_constraint = cancellation_constraint)
return cx
else:
last_move, prev_cap = self.getLastCobordism()
cur_da = CobordismDARight(Cobordism(self.genus, last_move, RIGHT))
dstr = cur_da.tensorD(
dstr, cancellation_constraint = cancellation_constraint)
dstr.reindex()
dstr.simplify(cancellation_constraint = cancellation_constraint)
return prev_cap.closeCap(dstr, cancellation_constraint)
class BridgePresentation(object):
"""Represents a bridge presentation of a knot. Computes HF of branched
double cover from bridge presentation.
"""
def __init__(self, name, start, braid_word, end):
"""Specifies start (of type BraidCap), braid_word (of type list), and
end (of type BraidCap).
"""
self.name = name
self.num_strands = len(start)
self.start = start
self.braid_word = braid_word
self.end = end
def getHF(self, method = "Mor"):
"""Computes HF of branched double cover."""
assert method in ("Mor", "Tensor")
start_d = BraidCap(self.start).openCap()
slides = Braid(self.num_strands).getArcslides(self.braid_word)
if method == "Tensor":
slides = [slide.inverse() for slide in slides]
slides_dd = [slide.getDDStructure() for slide in slides]
start_d = composeDD(
start_d, slides_dd, is_dual = False, method = method)
if method == "Tensor":
end_d = BraidCap(self.end).openCap().dual()
cx = computeATensorD(end_d, start_d)
else:
end_d = BraidCap(self.end).openCap()
cx = end_d.morToD(start_d)
cx.simplify()
cx.reindex()
return cx
def getHFByLocalDA(self):
"""Compute HF of branched double cover, using local type DA structures
for arcslides.
"""
start_d = BraidCap(self.start).openCap()
slides = Braid(self.num_strands).getArcslides(self.braid_word)
slides = [slide.inverse() for slide in slides]
for slide in slides:
print("%d" % len(start_d), end=' ')
sys.stdout.flush(),
start_d = ArcslideDA(slide).tensorD(start_d)
start_d.reindex()
start_d.simplify()
# Close off using cobordisms
cx = BraidCap(self.end).closeCap(start_d)
return cx
def addStrandsAtRight(self):
"""Return a bridge presentation for the same knot, with two more strands
at right that are not involved in any crossings. The new bridge
presentation will always be acceptable to getSpecSeq below.
"""
n = self.num_strands
new_start = self.start + [n+2, n+1]
new_end = self.end + [-1, -1]
for i in range(len(new_end)):
if new_end[i] == n:
# Strand i+1 is matched with n+2
new_end[i] = n+2
new_end[n+1] = i+1
# Strand n matched with n+1 (note indices are 0-based).
new_end[n-1] = n+1
new_end[n] = n
return BridgePresentation(
self.name, new_start, self.braid_word, new_end)
def getSpecSeq(self):
"""Compute the spectral sequence from Khovanov homology to HF of
branched double cover, using local type DA structures for Dehn twists
(as mapping cone between identity and anti-braid).
Returns a list of lists. The first list contains counts of filtration
gradings of generators in the Khovanov homology, starting from the one
with lowest to the one with the highest filtration grading. The next
list contains counts of filtration gradings in the E_3 page, etc, until
the pages have stabilized.
"""
if self.num_strands-2 in self.braid_word:
return self.addStrandsAtRight().getSpecSeq()
start_d = BraidCap(self.start).openCap()
genus = self.num_strands//2 - 1
for twist in self.braid_word:
print("%d" % len(start_d), end=' ')
sys.stdout.flush()
abs_twist = abs(twist)
assert 1 <= abs_twist <= self.num_strands-2
# Choice of orientation for the knot
if twist < 0:
surgery = DehnSurgeryDA(genus, abs_twist-1, POS)
else:
surgery = DehnSurgeryDA(genus, abs_twist-1, NEG)
surgery_da = surgery.getMappingCone()
start_d = surgery_da.tensorD(start_d)
start_d.reindex()
# Must be done in two steps
start_d.simplify(cancellation_constraint = lambda x, y: (
sum(x.filtration) == sum(y.filtration)))
start_d.simplify(cancellation_constraint = lambda x, y: (
sum(x.filtration) + 1 >= sum(y.filtration)))
# Must not simplify everything immediately.
cx = BraidCap(self.end).closeCap(
start_d, cancellation_constraint = lambda x, y: (
sum(x.filtration) == sum(y.filtration)))
cx.reindex()
cx.checkDifferential()
cx.simplify(cancellation_constraint = lambda x, y: (
sum(x.filtration) == sum(y.filtration)))
result = []
filt_diff = 1
while any(x.diff() != 0 for x in cx.getGenerators()):
cx.simplify(cancellation_constraint = lambda x, y: (
sum(x.filtration) + filt_diff >= sum(y.filtration)))
filt_grs = [sum(gen.filtration) for gen in cx.getGenerators()]
if filt_diff == 1:
# Find minimum and maximum at the second page
min_filt, max_filt = min(filt_grs), max(filt_grs)
result.append([filt_grs.count(i)
for i in range(min_filt, max_filt+1)])
filt_diff += 1
return result
def __str__(self):
return str(self.name)
def __repr__(self):
result = "Bridge presentation for %s:\n" % str(self.name)
result += "Start = %s\n" % str(self.start)
result += "Braid word = %s\n" % str(self.braid_word)
result += "End = %s\n" % str(self.end)
return result
def readBridgePresentation(str_input):
"""Read bridge presentation from string input. The format is as follows:
str_input is a line with space-separated tokens. The first token is the
name of the knot (can be anything that doesn't contain a space). The
remaining tokens are integers. The first integer is the integer k/2, where
k is the number of strands in the braid.
The next k integers 0 <= a_0, a_1, ... a_{k-1} < k specify the top closure
of the braid. That is, the i'th strand is paired with the (a_i)'th strand
(so we must have a_{a_i} = i).
The next integer is the number of crossings n. This is followed by n pairs
of integers, with each pair i, j meaning moving the i'th strand over the
j'th strand (so |i - j| = 1).
The next k integers specify the bottom closure of the braid, in the same
format as that for the top closure.
"""
tokens = str_input.split()
name = tokens[0]
rest = [int(token) for token in tokens[1:]]
bridge_size = rest[0]
start = [1+n for n in rest[1:1+bridge_size*2]]
rest = rest[1+bridge_size*2:]
num_cross = rest[0]
braid_word = []
for i in range(num_cross):
p, q = rest[2*i+1], rest[2*i+2]
if p == q-1:
braid_word.append(q)
elif p == q+1:
braid_word.append(-p)
else:
assert False
rest = rest[1+num_cross*2:]
end = [1+n for n in rest]
return BridgePresentation(name, start, braid_word, end)
| 15,097
| 37.417303
| 80
|
py
|
bfh_python
|
bfh_python-master/experimental.py
|
"""Try different things here by adding test cases. Tests added here are not
included in testmod.
"""
from braid import *
from dehntwist import *
from digraph import computeDATensorDD
from dstructure import SimpleDStructure, SimpleDGenerator
from dstructure import zeroTypeD
from ddstructure import SimpleDDGenerator, SimpleDDStructure
from ddstructure import DDStrFromDStr
from utility import DEFAULT_GRADING, F2, SMALL_GRADING
import itertools
import unittest
class ExperimentalTest(unittest.TestCase):
def testDehnTwist(self):
slides = Braid(8).getArcslides(-5)
assert len(slides) == 2
print("Getting DD Structures")
slides_dd = [slide.getDDStructure() for slide in slides]
print("Tensoring")
dehn_twist = computeDATensorDD(*slides_dd)
print("Cleaning up and checks")
dehn_twist.reindex()
dehn_twist.checkGrading()
self.assertTrue(dehn_twist.testDelta())
twist = DehnTwist(3, 4, NEG)
print("Getting DD from dehntwist")
twist_dd = twist.getDDStructure()
print("Comparing")
print(twist_dd.compareDDStructures(dehn_twist))
def testAbsoluteGrading(self):
assert DEFAULT_GRADING == SMALL_GRADING
dd_abs_info = 1
gr_info = [4]
d1 = zeroTypeD(1, is_dual = False, abs_gr_info = gr_info)
d1d = zeroTypeD(1, is_dual = True, abs_gr_info = gr_info)
d2 = infTypeD(1, is_dual = False, abs_gr_info = gr_info)
d2d = infTypeD(1, is_dual = True, abs_gr_info = gr_info)
cases = [(d1d, [], d2),
(d2d, [], d1),
(d1d, [(1,0)], d2), # Dehn twist for d1d
(d2d, [(2,1)], d1), # Dehn twist for d2d
(d1d, [(3,2)], d1), # d1d -> d2d
(d2d, [(0,1)], d2), # d2d -> d1d
(d2d, [(1,0)], d1), # Dehn twist for d1
(d1d, [(2,1)], d2), # Dehn twist for d2
(d2d, [(3,2)], d2), # d2 -> d1
(d1d, [(0,1)], d1), # d1 -> d2
(d1d, [(3,2),(3,2)], d2), # Case 0 and 1
(d1d, [(2,1),(2,1)], d1), # Hopf link
(d1d, [(2,3)]*3, d1), # Trefoil
(d1d, [(2,3),(1,0),(2,3)], d1), # Trefoil 2
(d1d, [(2,3),(1,2)]*3, d2), # Boundary dehn twist
(d2d, [(2,3),(1,2)]*3, d1), # Boundary dehn twist, #2
(d2d, [(3,2)]*3, d1), # ?
(d1d, [(2,1),(1,0),(1,2),(2,3),(2,3)], d1), # Unknot
(d1d, [(3,2),(0,1)], d2)
]
for start, slides, end in cases[0:10]:
slides_dd = [Arcslide(splitPMC(1), b1, c1).\
getDDStructure(dd_abs_info) for b1, c1 in slides]
d_mid = start
# print "start grading ", d_mid.grading
for dd in slides_dd:
# print dd.gr_set
# for gen, gr in dd.grading.items():
# print gen, gr
d_mid = computeATensorDD(d_mid, dd)
d_mid.simplify()
# print "mid grading ", d_mid.grading
# print d_mid.gr_set.simplifiedSet()
# for gen in d_mid.getGenerators():
# print gen, d_mid.grading[gen].simplifiedElt()
cur_cx = computeATensorD(d_mid, end)
cur_cx.simplify()
# print cur_cx.gr_set, cur_cx.grading
# Alternate way of computing
# d_mid = end
# for dd in reversed(slides_dd):
# d_mid = computeDATensorD(dd, d_mid)
# cur_cx = computeATensorD(start, d_mid)
cur_abs_gr = cur_cx.getAbsGradingInfo()
print([str(n) for n in cur_abs_gr])
def testGenus2AbsoluteGrading(self):
dd_abs_info = 0
gr_info = [0,0]
print(gr_info)
d1 = zeroTypeD(2, is_dual = False, abs_gr_info = gr_info)
d1d = zeroTypeD(2, is_dual = True, abs_gr_info = gr_info)
d2 = infTypeD(2, is_dual = False, abs_gr_info = gr_info)
d2d = infTypeD(2, is_dual = True, abs_gr_info = gr_info)
cases = [(d1d, [], d2),
(d2d, [], d1),
(d1d, [(1,0)], d2), # Dehn twist for d1d
(d2d, [(2,1)], d1), # Dehn twist for d2d
(d1d, [(3,2),(7,6)], d1), # d1d -> d2d
(d2d, [(0,1),(4,5)], d2), # d2d -> d1d
(d1d, [(7,6),(0,1)], d1), # mixed
(d2d, [(1,0)], d1), # Dehn twist for d1
(d1d, [(2,1)], d2), # Dehn twist for d2
(d2d, [(3,2),(7,6)], d2), # d2 -> d1
(d1d, [(0,1),(4,5)], d1), # d1 -> d2
(d1d, [(3,2),(7,6),(3,4),(6,7),(5,6),(1,2),(3,4),(3,4),(5,6),
(5,6),(6,7),(4,3),(5,4),(6,5)], d2),
(d2d, [(3,4),(6,7),(5,6),(6,5),(4,3),(4,3),(2,1),(2,1),(1,0),
(4,3),(5,4),(6,5)], d2),
(d2d, [(3,4),(6,7),(5,6),(6,7),(5,6),(5,6),(3,4),(3,4),(1,2),
(4,3),(5,4),(6,5)], d2),
(d1d, [(3,4),(6,7),(5,6)] + \
[(6,5),(4,3),(4,3),(2,1),(2,1),(1,0)]*5 + \
[(4,3),(5,4),(6,5)], d2),
]
for start, slides, end in cases[0:10]:
cur_pmc = splitPMC(2)
slides_dd = []
for b1, c1 in slides:
arcslide = Arcslide(cur_pmc, b1, c1)
cur_pmc = arcslide.end_pmc
slides_dd.append(arcslide.getDDStructure(dd_abs_info))
d_mid = start
for dd in slides_dd:
d_mid = computeATensorDD(d_mid, dd)
d_mid.simplify()
d_mid.reindex()
# print d_mid
# print d_mid.gr_set.simplifiedSet()
# for gen in d_mid.getGenerators():
# print gen, d_mid.grading[gen].simplifiedElt()
cur_cx = computeATensorD(d_mid, end)
# dd_mid = slides_dd[0]
# for dd in slides_dd[1:]:
# dd_mid = computeDATensorDD(dd_mid, dd)
# dd_mid.simplify()
# dd_mid.reindex()
# # print dd_mid
# print dd_mid.gr_set.simplifiedSet()
# for gen in dd_mid.getGenerators():
# print gen, dd_mid.grading[gen].simplifiedElt()
# Alternate way of computing
# d_mid = end
# for dd in reversed(slides_dd):
# d_mid = computeDATensorD(dd, d_mid)
# cur_cx = computeATensorD(start, d_mid)
cur_abs_gr = cur_cx.getAbsGradingInfo()
print([str(n) for n in cur_abs_gr])
def testBraidAbsoluteGrading(self):
# getHF() does not yet support absolute grading. Test using the other
# functions first
std_cap = [6,3,2,5,4,1]
to_test = [[std_cap, [], [2,1,4,3,6,5]],
[std_cap, [1,-1], [2,1,4,3,6,5]],
[std_cap, [2], [2,1,4,3,6,5]],
[std_cap, [-4], [2,1,4,3,6,5]]]
for start_cap, braid_word, end_cap in to_test:
br = BridgePresentation("br_test", start_cap, braid_word, end_cap)
cx = br.getHF()
# print cx.gr_set
# for gen, gr in cx.grading.items():
# print gen, gr
abs_gr = cx.getAbsGradingInfo()
print([str(n) for n in abs_gr])
def testAlgSize(self):
print(len(splitPMC(3).getAlgebra().getGenerators()))
def testTypeDInvariant(self):
d_start = infTypeD(2, is_dual = True, abs_gr_info = [2,2])
# (b_1, c_1) for arcslides
cases = [
# Original
[],
# Twisting a handle
[(2,1)],
# Twisting a knob (half twist)
[(2,3),(1,2)]*3,
# Interchanging two knobs
[(3,4),(6,7),(5,6),(4,5),(2,3),(5,6),(4,5),(3,4),
(1,2),(4,5),(3,4),(2,3),(0,1),(3,4),(2,3),(1,2)],
# Slide1
[(4,3),(1,0),(1,2),(5,4),(6,5)],
# Slide2
[(0,1),(3,4),(6,7),(6,5),(2,3),(1,2),(3,2)]
]
for slides in cases:
# Convert (b_1, c_1) into arcslides and then DD structures
cur_pmc = splitPMC(2)
slides_dd = []
for b1, c1 in slides:
arcslide = Arcslide(cur_pmc, b1, c1)
cur_pmc = arcslide.end_pmc
slides_dd.append(arcslide.getDDStructure())
# Tensor each of the DD structures onto d_start
d_mid = d_start
for dd in slides_dd:
d_mid = computeATensorDD(d_mid, dd)
d_mid.simplify()
d_mid.reindex()
print("Case: %s" % slides)
print(d_mid)
# Rough check that this equals the original
self.assertEqual(len(d_mid), 1)
self.assertEqual(len(d_mid.getGenerators()[0].delta()), 2)
def testTrefoilSurgery(self):
"""Computes HF for +1 and -1 surgery on left-handed trefoil. """
# Everything is over the PMC of genus 1
pmc = splitPMC(1)
algebra = pmc.getAlgebra()
# Two idempotents
i0 = pmc.idem([0])
i1 = pmc.idem([1])
# Some algebra elements
rho1 = pmc.sd([(0,1)])
rho2 = pmc.sd([(1,2)])
rho3 = pmc.sd([(2,3)])
rho23 = pmc.sd([(1,3)])
rho123 = pmc.sd([(0,3)])
# Now CFD(H_+1)
d_p1 = SimpleDStructure(F2, algebra)
a = SimpleDGenerator(d_p1, i1, "a")
b = SimpleDGenerator(d_p1, i0, "b")
d_p1.addGenerator(a)
d_p1.addGenerator(b)
d_p1.addDelta(a, b, rho2, 1)
d_p1.addDelta(b, a, rho123, 1)
print("CFD(H_+1): ", d_p1)
# and CFD(H_-1)
d_p2 = SimpleDStructure(F2, algebra)
a = SimpleDGenerator(d_p2, i1, "a")
b = SimpleDGenerator(d_p2, i0, "b")
d_p2.addGenerator(a)
d_p2.addGenerator(b)
d_p2.addDelta(b, a, rho1, 1)
d_p2.addDelta(b, a, rho3, 1)
print("CFD(H_-1): ", d_p2)
# CFD(trefoil)
d_trefoil = SimpleDStructure(F2, algebra)
x = SimpleDGenerator(d_trefoil, i0, "x")
y = SimpleDGenerator(d_trefoil, i0, "y")
z = SimpleDGenerator(d_trefoil, i0, "z")
k = SimpleDGenerator(d_trefoil, i1, "k")
l = SimpleDGenerator(d_trefoil, i1, "l")
mu1 = SimpleDGenerator(d_trefoil, i1, "mu1")
mu2 = SimpleDGenerator(d_trefoil, i1, "mu2")
for gen in [x, y, z, k, l, mu1, mu2]:
d_trefoil.addGenerator(gen)
d_trefoil.addDelta(x, k, rho1, 1)
d_trefoil.addDelta(y, k, rho123, 1)
d_trefoil.addDelta(mu2, x, rho2, 1)
d_trefoil.addDelta(mu1, mu2, rho23, 1)
d_trefoil.addDelta(z, mu1, rho123, 1)
d_trefoil.addDelta(l, y, rho2, 1)
d_trefoil.addDelta(z, l, rho3, 1)
print("CFD(trefoil): ", d_trefoil)
# Compute the Mor complexes
cx1 = d_p1.morToD(d_trefoil)
# cx1 = computeATensorD(d_p1, d_trefoil)
cx1.simplify()
print("First result: ", cx1)
cx2 = d_p2.morToD(d_trefoil)
# cx2 = computeATensorD(d_p2, d_trefoil)
cx2.simplify()
print("Second result: ", cx2)
def testLinkComplement(self):
"""Computes type DD structure associated to the complement of a certain
link. Sequence of arcslides provided by Adam Levine.
"""
twist1 = [(7,6),(7,6),(7,6),(7,6),(7,6),(7,6)]
twist2 = [(4,3),(1,0),(2,1),(3,2),
(5,4),(2,1),(3,2),(4,3),
(6,5),(3,2),(4,3),(1,2),(0,1),(6,5)]
start_pmc = splitPMC(2)
twist_slides = {}
twist_slides[1] = []
twist_slides[2] = []
cur_pmc = start_pmc
for (b1, c1) in twist1:
arcslide = Arcslide(cur_pmc, b1, c1)
cur_pmc = arcslide.end_pmc
twist_slides[1].append(arcslide)
cur_pmc = start_pmc
for (b1, c1) in twist2:
arcslide = Arcslide(cur_pmc, b1, c1)
cur_pmc = arcslide.end_pmc
twist_slides[2].append(arcslide)
twist_slides[-1] = [slide.inverse()
for slide in reversed(twist_slides[1])]
twist_slides[-2] = [slide.inverse()
for slide in reversed(twist_slides[2])]
# seq = [-2, -2, 1, 2]
# seq = [2, 1, -2, -2]
# seq = [-2]
seq = [1]
slides_total = []
for twist in seq:
slides_total.extend(twist_slides[twist])
d_mid = infTypeD(2)
# This shows our choice of starting type D structure is correct.
# Doing this Dehn twist only should not change the starting type D
# structure.
# slides_total = [Arcslide(start_pmc, 6, 7)]
# slides_total = [Arcslide(start_pmc, 7, 6)]
for slide in slides_total:
print(slide)
print(d_mid)
slide_dd = slide.getDDStructure(0)
d_mid = slide_dd.morToD(d_mid)
d_mid.simplify()
d_mid.reindex()
print(d_mid)
dd_final = DDStrFromDStr(d_mid, 1)
dd_final.testDelta()
print(dd_final)
dd_final.simplify()
dd_final.reindex()
print(dd_final)
def testTwoStrandGenus1(self):
# Just code to print out differential and multiplication for an algebra.
gens = splitPMC(1).getAlgebra(
idem_size = 2, mult_one = False).getGenerators()
for g in gens:
print("d(%s) = %s" % (g, g.diff()))
for g1, g2 in itertools.product(gens, gens):
if g1.isIdempotent() or g2.isIdempotent():
continue
if g1 * g2 != 0:
print("%s * %s = %s" % (g1, g2, g1*g2))
def testT4nTorus(self):
# Computation for torus links T(4,n).
for n in range(1, 15):
knot = BridgePresentation("T4_%d" % n, (8,7,6,5,4,3,2,1),
[1,2,3]*n, (8,7,6,5,4,3,2,1))
print(knot.name, len(knot.getHFByLocalDA()))
def testDDStructureDelta(self):
# Construct type DD structures, and test whether d^2 = 0 holds.
# PMC on both sides are genus 1 split PMC.
pmc = splitPMC(1)
# Strand algebra corresponding to pmc.
alg = pmc.getAlgebra()
# Initialize type DD structure over field F_2, with (left-left) action
# by the genus 1 strand algebra. Intend to make this type DD bimodule
# for identity.
ddstr1 = SimpleDDStructure(F2, alg, alg)
# Initialize the list of generators to add to ddstr1.
# The generators have "complementary" idempotents. However, since the
# PMCs are in opposite direction on both sides, the vector specifying
# idempotents are the same.
idems = {"x" : ([0], [0]),
"y" : ([1], [1])}
gens = {}
for name, (idem1, idem2) in list(idems.items()):
gens[name] = SimpleDDGenerator(
ddstr1, Idempotent(pmc, idem1), Idempotent(pmc, idem2), name)
ddstr1.addGenerator(gens[name])
# Now add delta
ddstr1.addDelta(gens["x"], gens["y"],
pmc.sd([(0, 1)]), pmc.sd([(2, 3)]), 1)
ddstr1.addDelta(gens["y"], gens["x"],
pmc.sd([(1, 2)]), pmc.sd([(1, 2)]), 1)
ddstr1.addDelta(gens["x"], gens["y"],
pmc.sd([(2, 3)]), pmc.sd([(0, 1)]), 1)
# This already satisfies d^2 = 0
self.assertTrue(ddstr1.testDelta())
# However, one more arrow to finish the bimodule
ddstr1.addDelta(gens["x"], gens["y"],
pmc.sd([(0, 3)]), pmc.sd([(0, 3)]), 1)
# This is now the identity bimodule, of course satisfying d^2 = 0.
self.assertTrue(ddstr1.testDelta())
# Second example, showing failure of testDelta()
ddstr2 = SimpleDDStructure(F2, alg, alg)
# Add the same generators as before
gens = {}
for name, (idem1, idem2) in list(idems.items()):
gens[name] = SimpleDDGenerator(
ddstr2, Idempotent(pmc, idem1), Idempotent(pmc, idem2), name)
ddstr2.addGenerator(gens[name])
# Now add delta
ddstr2.addDelta(gens["x"], gens["y"],
pmc.sd([(0, 1)]), pmc.sd([(0, 1)]), 1)
ddstr2.addDelta(gens["y"], gens["x"],
pmc.sd([(1, 2)]), pmc.sd([(1, 2)]), 1)
# Prints the type DD structure. Note the code already checks that
# idempotent matches in all added arrows (throws an error if they don't
# match).
print(ddstr2)
# However, testDelta() fails. Prints a term in d^2(x).
self.assertFalse(ddstr2.testDelta())
if __name__ == "__main__":
unittest.main()
| 16,918
| 39.670673
| 80
|
py
|
bfh_python
|
bfh_python-master/cobordismtest.py
|
"""Unit test for cobordismtest.py"""
from cobordism import *
from dstructure import platTypeD
import unittest
class CobordismTest(unittest.TestCase):
def testCobordism(self):
for genus, c_pair, side in [
(2, 1, RIGHT), (2, 2, RIGHT), (3, 1, RIGHT),
(3, 2, RIGHT), (3, 3, RIGHT), (3, 4, RIGHT)]:
c = Cobordism(genus, c_pair, side)
c.getDDStructure() # verifies testDelta()
def testCobordismShort(self):
for genus, c_pair, side in [
(2, 0, RIGHT), (2, 3, RIGHT), (3, 0, RIGHT),
(3, 5, RIGHT)]:
c = Cobordism(genus, c_pair, side)
c.getDDStructure() # verifies testDelta()
def testMorToPlatRight(self):
for genus, c_pair, side in [
(2, 1, RIGHT), (2, 3, RIGHT), (3, 1, RIGHT),
(3, 3, RIGHT), (3, 5, RIGHT)]:
dd = Cobordism(genus, c_pair, side).getDDStructure()
plat_d = platTypeD(genus-1)
plat_d2 = dd.morToD(plat_d)
plat_d2.simplify()
self.assertTrue(plat_d2.compareDStructures(platTypeD(genus)))
for genus, c_pair, side in [
(2, 0, RIGHT), (2, 2, RIGHT), (3, 0, RIGHT),
(3, 2, RIGHT), (3, 4, RIGHT)]:
dd = Cobordism(genus, c_pair, side).getDDStructure()
plat_d = platTypeD(genus-1).dual()
plat_d2 = dd.morToD(plat_d)
plat_d2.simplify()
self.assertTrue(plat_d2.compareDStructures(platTypeD(genus).dual()))
def testMorToPlatLeft(self):
for genus, c_pair, side in [
(2, 0, LEFT), (2, 2, LEFT), (3, 0, LEFT),
(3, 2, LEFT), (3, 4, LEFT)]:
dd = Cobordism(genus, c_pair, side).getDDStructure()
plat_d = platTypeD(genus)
plat_d2 = dd.morToD(plat_d)
plat_d2.simplify()
self.assertTrue(plat_d2.compareDStructures(platTypeD(genus-1)))
for genus, c_pair, side in [
(2, 1, LEFT), (2, 3, LEFT), (3, 1, LEFT),
(3, 3, LEFT), (3, 5, LEFT)]:
dd = Cobordism(genus, c_pair, side).getDDStructure()
plat_d = platTypeD(genus).dual()
plat_d2 = dd.morToD(plat_d)
plat_d2.simplify()
self.assertTrue(
plat_d2.compareDStructures(platTypeD(genus-1).dual()))
if __name__ == "__main__":
unittest.main()
| 2,444
| 37.809524
| 80
|
py
|
bfh_python
|
bfh_python-master/grading.py
|
"""Handles grading groups and grading sets."""
from fractions import Fraction
from math import gcd
from numbers import Number
from linalg import RowSystem
from utility import flatten, grTypeStr, memorize, oppSide, sideStr, tolist
from utility import ACTION_LEFT, ACTION_RIGHT, BIG_GRADING, SMALL_GRADING
class Group(object):
"""Represents a general group."""
def multiply(self, elt1, elt2):
"""Returns the product of gen1 and gen2."""
raise NotImplementedError("Multiply not implemented.")
class GroupElement(object):
"""Represents an element of a group."""
def __init__(self, parent):
"""Specifies which group this element is in."""
self.parent = parent
def __mul__(self, other):
"""Multiplies this group element with other."""
return self.parent.multiply(self, other)
class BigGradingGroup(Group):
"""Big grading group associated to a PMC."""
def __init__(self, pmc):
self.pmc = pmc
self.type = BIG_GRADING
self.spinc_len = self.pmc.n - 1
def __eq__(self, other):
return self.pmc == other.pmc
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.pmc, "BigGradingGroup"))
def multiply(self, elt1, elt2):
if not isinstance(elt1, BigGradingElement):
return NotImplemented
if not isinstance(elt2, BigGradingElement):
return NotImplemented
assert elt1.parent == self and elt2.parent == self
m1 = elt1.spinc
m2 = elt2.spinc
new_maslov = 0
for i in range(len(m1)-1):
new_maslov += (m1[i] * m2[i+1] - m1[i+1] * m2[i])
new_maslov /= Fraction(2)
new_maslov += (elt1.maslov + elt2.maslov)
new_spinc = [a+b for a, b in zip(m1, m2)]
return BigGradingElement(self, new_maslov, new_spinc)
def opp(self):
"""Returns the big grading group associated to the opposite PMC."""
return BigGradingGroup(self.pmc.opp())
def zero(self):
"""Returns the zero element of this grading group."""
return BigGradingElement(self, 0, [0]*self.spinc_len)
def central(self):
"""Returns the central element (lambda) of this grading group. This has
maslov component 1 and spinc component zero.
"""
return BigGradingElement(self, 1, [0]*(self.pmc.n-1))
def basis(self, i):
"""Returns i'th basis element of the spinc component, with maslov
component zero.
"""
spinc_vec = [0] * self.spinc_len
spinc_vec[i] = 1
return BigGradingElement(self, 0, spinc_vec)
class BigGradingElement(GroupElement):
"""An element of the big grading group."""
def __init__(self, parent, maslov, spinc):
"""Specifies the maslov and spinc component of the grading. The spinc
component is a list of pmc.n-1 multiplicities.
"""
GroupElement.__init__(self, parent)
self.maslov = maslov
self.spinc = list(spinc)
assert len(self.spinc) == self.parent.pmc.n - 1
def __eq__(self, other):
if isinstance(other, int) and other == 0:
return self.maslov == 0 and all([n == 0 for n in self.spinc])
return self.parent == other.parent and self.maslov == other.maslov \
and self.spinc == other.spinc
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.parent, self.maslov, self.spinc))
def __str__(self):
return "[%s; %s]" % (str(self.maslov),
", ".join(str(n) for n in self.spinc))
def __repr__(self):
return str(self)
def opp(self):
"""Returns the corresponding grading element in the opposite PMC. Keeps
the sign of both Maslov and Spin-c components.
"""
return BigGradingElement(self.parent.opp(), self.maslov,
reversed(self.spinc))
def Ropp(self):
"""Returns the corresponding grading element in the opposite PMC. Keeps
the Maslov component and reverses the sign of Spin-c components. (This
is the R(.) operator in the papers).
"""
return BigGradingElement(self.parent.opp(), self.maslov,
reversed([-n for n in self.spinc]))
def inverse(self):
"""Returns the inverse of this grading element. Reverses Maslov and
Spin-c components.
"""
return BigGradingElement(self.parent, -self.maslov,
[-n for n in self.spinc])
def power(self, exp):
"""Returns this grading element raised to the given power (basically
multiplies the maslov and each spinc component by exp.
"""
return BigGradingElement(self.parent, self.maslov * exp,
[n * exp for n in self.spinc])
def toSmallGrading(self):
"""Returns the corresponding small grading element. Should be called
only for elements already in the small grading group (that is,
M_*\delta(s) = 0).
"""
pmc = self.parent.pmc
gr_group = SmallGradingGroup(pmc)
mult_tmp = list(self.spinc)
small_mult = []
for p, q in pmc.pairs:
# The first entry of pairs should be in ascending order
lead_mult = mult_tmp[p]
small_mult.append(lead_mult)
for pos in range(p, q):
mult_tmp[pos] -= lead_mult
assert all([n == 0 for n in mult_tmp])
return SmallGradingElement(gr_group, self.maslov, small_mult)
class SmallGradingGroup(Group):
"""Small grading group associated to a PMC."""
def __init__(self, pmc):
self.pmc = pmc
self.type = SMALL_GRADING
self.spinc_len = self.pmc.num_pair
def __str__(self):
return "Small grading group over PMC %s" % str(self.pmc)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.pmc == other.pmc
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.pmc, "SmallGradingGroup"))
def opp(self):
"""Returns the small grading group associated to the opposite PMC."""
return SmallGradingGroup(self.pmc.opp())
def multiply(self, elt1, elt2):
if not isinstance(elt1, SmallGradingElement):
return NotImplemented
if not isinstance(elt2, SmallGradingElement):
return NotImplemented
assert elt1.parent == self and elt2.parent == self
m1 = elt1.spinc
m2 = elt2.spinc
new_spinc = [a+b for a, b in zip(m1, m2)]
new_maslov = elt1.maslov + elt2.maslov
# Note this relies on the ordering of the pairs and the points inside
# each pair.
for i in range(self.pmc.num_pair):
for j in range(i+1, self.pmc.num_pair):
if self.pmc.pairs[i][1] > self.pmc.pairs[j][0] and \
self.pmc.pairs[i][1] < self.pmc.pairs[j][1]:
new_maslov += (m1[i] * m2[j] - m1[j] * m2[i])
return SmallGradingElement(self, new_maslov, new_spinc)
def zero(self):
"""Returns the zero element of this grading group."""
return SmallGradingElement(self, 0, [0]*self.pmc.num_pair)
def central(self):
"""Returns the central element (lambda) of this grading group. This has
maslov component 1 and spinc component zero.
"""
return SmallGradingElement(self, 1, [0]*self.pmc.num_pair)
def basis(self, i):
"""Returns i'th basis element of the spinc component, with maslov
component zero.
"""
spinc_vec = [0] * self.spinc_len
spinc_vec[i] = 1
return SmallGradingElement(self, 0, spinc_vec)
class SmallGradingElement(GroupElement):
"""An element of the small grading group."""
def __init__(self, parent, maslov, spinc):
"""Specifies the maslov and spinc component of the grading. The spinc
component is a list of pmc.num_pair multiplicities.
"""
GroupElement.__init__(self, parent)
self.maslov = maslov
self.spinc = list(spinc)
assert len(self.spinc) == self.parent.pmc.num_pair
def __eq__(self, other):
if isinstance(other, int) and other == 0:
return self.maslov == 0 and all([n == 0 for n in self.spinc])
return self.parent == other.parent and self.maslov == other.maslov \
and self.spinc == other.spinc
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.parent, self.maslov, self.spinc))
def __str__(self):
return "[%s; %s]" % (str(self.maslov),
", ".join(str(n) for n in self.spinc))
def __repr__(self):
return str(self)
def opp(self):
"""Returns the corresponding grading element in the opposite PMC. Keeps
the sign of both Maslov and Spin-c components.
"""
return self.toBigGrading().opp().toSmallGrading()
def Ropp(self):
"""Returns the corresponding grading element in the opposite PMC. Keeps
the Maslov component and reverses the sign of Spin-c components. (This
is the R(.) operator in the papers).
"""
return self.toBigGrading().Ropp().toSmallGrading()
def inverse(self):
"""Returns the inverse of this grading element. Reverses Maslov and
Spin-c components.
"""
return SmallGradingElement(self.parent, -self.maslov,
[-n for n in self.spinc])
def power(self, exp):
"""Returns this grading element raised to the given power (basically
multiplies the maslov and each spinc component by exp.
"""
return SmallGradingElement(self.parent, self.maslov * exp,
[n * exp for n in self.spinc])
def toBigGrading(self):
"""Returns the corresponding big grading element."""
pmc = self.parent.pmc
gr_group = BigGradingGroup(pmc)
big_mult = [0] * (pmc.n - 1)
for i in range(pmc.num_pair):
p, q = pmc.pairs[i]
for pos in range(p, q):
big_mult[pos] += self.spinc[i]
return BigGradingElement(gr_group, self.maslov, big_mult)
class GradingRefinement(dict):
"""Represents a fixed grading refinement for a PMC, as a dictionary from
idempotents to big grading elements.
"""
def __init__(self, pmc, idem_size, data):
"""data is a dictionary mapping idempotents (in the given PMC) to big
grading elements.
"""
dict.__init__(self, data)
self.pmc = pmc
self.idem_size = idem_size
self._checkRefinement()
def _checkRefinement(self):
"""Check the validity of this refinement. It suffices to check that,
for each pair of idempotents (s, t), we have
M_*\delta(\psi(s) - \psi(t)) = s - t,
where \psi is the refinement map, \delta takes the differential of the
corresponding one chain on the border, and M_* is the pairing map. It
is also enough to check this only between one idempotent and all
others.
"""
idems = self.pmc.getIdempotents(self.idem_size)
ref_idem = idems[0]
for idem in idems:
gr_diff = self[idem] * self[ref_idem].inverse()
# Take \delta map
pt_mult = [-gr_diff.spinc[0]]
for i in range(1, self.pmc.n-1):
pt_mult.append(gr_diff.spinc[i-1] - gr_diff.spinc[i])
pt_mult.append(gr_diff.spinc[-1])
# Take M_* map
idem_mult = [0] * self.pmc.num_pair
for i in range(self.pmc.n):
idem_mult[self.pmc.pairid[i]] += pt_mult[i]
# Now compare with idem - ref_idem
expected_idem_mult = [0] * self.pmc.num_pair
for pair in idem:
expected_idem_mult[pair] += 1
for pair in ref_idem:
expected_idem_mult[pair] -= 1
assert idem_mult == expected_idem_mult
@memorize
def standardRefinementForIdem(pmc, idem):
gr_group = BigGradingGroup(pmc)
idem_size = len(idem)
mult = [0] * (pmc.n - 1)
maslov = 0
for i in range(idem_size):
p, q = pmc.pairs[idem[i]]
p_s, q_s = pmc.pairs[i]
assert p < q and p_s < q_s
if p > p_s:
for pos in range(p_s, p):
mult[pos] += 1
elif p < p_s:
for pos in range(p, p_s):
mult[pos] -= 1
return BigGradingElement(gr_group, maslov, mult)
@memorize
def standardRefinement(pmc, idem_size = None):
"""Returns the "standard" refinement of the given PMC. For this refinement,
add together the difference between the lower point for each pair and the
lower point for each of the idem_size pairs in the PMC.
"""
# This should not be used as the default refinement, as it does not
# guarantee sd.small_gr.opp() == sd.opp().small_gr for a strand diagram sd.
data = dict()
if idem_size is None:
idem_size = pmc.genus
for idem in pmc.getIdempotents(idem_size):
data[idem] = standardRefinementForIdem(pmc, idem)
return GradingRefinement(pmc, idem_size, data)
@memorize
def lowerRefinement(pmc, idem_size = None):
"""Returns the "lower" refinement of the given PMC. For this refinement,
add together the interval from bottom to the lower point for each pair in
the idempotent.
"""
# This should not be used as the default refinement, as it does not
# guarantee sd.small_gr.opp() == sd.opp().small_gr for a strand diagram sd.
data = dict()
gr_group = BigGradingGroup(pmc)
for idem in pmc.getIdempotents(idem_size):
mult = [0] * (pmc.n - 1)
for pair in idem:
p, q = pmc.pairs[pair]
assert p < q
for pos in range(0, p):
mult[pos] += 1
data[idem] = BigGradingElement(gr_group, 0, mult)
return GradingRefinement(pmc, idem_size, data)
@memorize
def averageRefinement(pmc, idem_size = None):
"""Returns the "average" refinement of the given PMC. In this refinement,
for each point in the idempotent, add 1/8 the interval from bottom to that
point and subtract 1/8 the interval from that point to the top. For points
not in the idempotent, reverse the signs.
"""
data = dict()
gr_group = BigGradingGroup(pmc)
for idem in pmc.getIdempotents(idem_size):
mult = [0] * (pmc.n - 1)
for p in range(0, pmc.n):
if pmc.pairid[p] in idem:
for pos in range(0, p):
mult[pos] += Fraction(1, 8)
for pos in range(p, pmc.n-1):
mult[pos] -= Fraction(1, 8)
else:
for pos in range(0, p):
mult[pos] -= Fraction(1, 8)
for pos in range(p, pmc.n-1):
mult[pos] += Fraction(1, 8)
data[idem] = BigGradingElement(gr_group, 0, mult)
return GradingRefinement(pmc, idem_size, data)
# Default function for getting refinement data
DEFAULT_REFINEMENT = averageRefinement
class GradingSet(object):
"""Represents a general grading set. Can specify an arbitrary number of
group actions (although only up to two is used).
"""
def __init__(self, actions):
"""actions is a list of tuples (group, side). side must be either
ACTION_LEFT or ACTION_RIGHT.
"""
self.actions = actions
# Either BIG_GRADING or SMALL_GRADING
# Take type from the first grading group that acts on it. Other grading
# groups should have the same type.
self.type = self.actions[0][0].type
def multiply(self, set_elt, grp_elt):
"""Returns the result of grp_elt acting on set_elt. grp_elt is a list
of grading group elements, whose length must equal the number of group
actions.
"""
raise NotImplementedError("Group action not implemented.")
def inverse(self):
"""Returns the inverse of this grading set. Change the side of all
actions and take the inverse of periodic domains.
"""
raise NotImplementedError("Set inverse not implemented.")
def opp(self):
"""Returns the opp of this grading set. Take the opp of all algebra
actions (changing sides at the same time) and all periodic domains.
"""
raise NotImplementedError("Set opp not implemented.")
def Ropp(self):
"""Returns the Ropp of this grading set. Take the opp of all algebra
actions (changing sides at the same time) and take Ropp of all periodic
domains.
"""
raise NotImplementedError("Set Ropp not implemented.")
class GradingSetElement(object):
"""Represents an element of a grading set."""
def __init__(self, parent):
"""Specify which grading set this element is in."""
self.parent = parent
def __mul__(self, other):
"""Returns the result of group action on self."""
other = tolist(other)
if len(other) != len(self.parent.actions):
return NotImplemented
for comp in other:
if not isinstance(comp, GroupElement):
return NotImplemented
return self.parent.multiply(self, other)
def __add__(self, other):
"""Adding an integer / rational number ``n`` means adding the maslov
component by ``n`` (multiplying by lambda^n).
"""
if not isinstance(other, Number):
return NotImplemented
if len(self.parent.actions) == 0:
return NotImplemented # Override to take care of this
to_mult = [gr_group.zero() for gr_group, side in self.parent.actions]
to_mult[0] = self.parent.actions[0][0].central().power(other)
return self.parent.multiply(self, to_mult)
def __sub__(self, other):
"""See description of __add__."""
return self + (-other)
def inverse(self):
"""Returns the inverse of this element, in the inverse grading set."""
raise NotImplementedError("Element inverse not implemented.")
def opp(self):
"""Returns the opp of this element, in the opp grading set."""
raise NotImplementedError("Element opp not implemented.")
def Ropp(self):
"""Returns the Ropp of this element, in the Ropp grading set."""
raise NotImplementedError("Element Ropp not implemented.")
class SimpleGradingSet(GradingSet):
"""Represents a grading set with one action, that can be simply written as
a grading group modulo a list of gradings of periodic domains.
"""
def __init__(self, gr_group, side, periodic_domains):
"""periodic_domains is a list of elements of gr_group. Multiply by one
of these on the side opposite to the acting side results in the same
element.
"""
GradingSet.__init__(self, [(gr_group, side)])
self.gr_group = gr_group
self.side = side
self.periodic_domains = periodic_domains
# Set up row system for periodic domains - use their spinc parts
row_vecs = [domain.spinc for domain in self.periodic_domains]
self.row_sys = RowSystem(row_vecs)
def multiply(self, set_elt, grp_elt):
if self.side == ACTION_LEFT:
new_data = grp_elt[0] * set_elt.data
else: # self.side == ACTION_RIGHT
new_data = set_elt.data * grp_elt[0]
return SimpleGradingSetElement(self, new_data)
@memorize
def inverse(self):
new_domains = [domain.inverse() for domain in self.periodic_domains]
return SimpleGradingSet(self.gr_group, oppSide(self.side), new_domains)
@memorize
def opp(self):
new_domains = [domain.opp() for domain in self.periodic_domains]
return SimpleGradingSet(self.gr_group.opp(), oppSide(self.side),
new_domains)
@memorize
def Ropp(self):
new_domains = [domain.Ropp() for domain in self.periodic_domains]
return SimpleGradingSet(self.gr_group.opp(), oppSide(self.side),
new_domains)
def eltEquals(self, elt1, elt2):
"""Equality test for grading elements. Uses the row system for periodic
domains.
"""
gr1, gr2 = elt1.data, elt2.data
m1, m2 = gr1.spinc, gr2.spinc
spinc_diff = [b-a for a, b in zip(m1, m2)]
# Get the combination of periodic domains that may give elt2 when
# multiplied with elt1
comb = self.row_sys.getComb(spinc_diff)
if comb is None:
return False
# Multiply these periodic domains onto elt1 on the side opposite to the
# action side.
for i in range(len(self.periodic_domains)):
to_mult = self.periodic_domains[i].power(comb[i])
if self.side == ACTION_LEFT:
gr1 = gr1 * to_mult
else:
gr1 = to_mult * gr1
# At least the spinc part should now be equal
assert gr1.spinc == gr2.spinc
return gr1.maslov == gr2.maslov
def zero(self):
"""Returns the zero element of this grading set."""
return SimpleGradingSetElement(self, self.gr_group.zero())
def simplifiedSet(self):
"""Need a consistent interface with GeneralGradingSet."""
return self
def simplifiedElt(self, elt):
"""Need a consistent interface with GeneralGradingSet."""
return elt
def __str__(self):
gr_group, side = self.actions[0]
result = "One-sided %s grading set with PMC %s on %s.\n" \
% (grTypeStr(self.type), str(gr_group.pmc), sideStr(side))
result += "Periodic domains:\n"
for domain in self.periodic_domains:
result += "%s\n" % str(domain)
return result
def __repr__(self):
return str(self)
class SimpleGradingSetElement(GradingSetElement):
"""Represents an element of a SimpleGradingSet."""
def __init__(self, parent, data):
"""In addition to parent set, specify data as an element of the grading
group.
"""
GradingSetElement.__init__(self, parent)
self.data = data
assert isinstance(data, GroupElement)
assert data.parent == self.parent.actions[0][0]
def __eq__(self, other):
# Equality test need to take into account of periodic domains
return self.parent.eltEquals(self, other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Dangerous to use hash since equality test is tricky.
return NotImplemented
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
def inverse(self):
return SimpleGradingSetElement(self.parent.inverse(),
self.data.inverse())
def opp(self):
return SimpleGradingSetElement(self.parent.opp(), self.data.opp())
def Ropp(self):
return SimpleGradingSetElement(self.parent.Ropp(), self.data.Ropp())
def simplifiedElt(self):
return self
class SimpleDbGradingSet(GradingSet):
"""Represents a grading set with two actions, that can be written as a pair
of grading groups modulo a list of gradings of periodic domains.
"""
def __init__(self, gr_group1, side1, gr_group2, side2, periodic_domains):
"""periodic_domains is a list of pairs of elements of gr_group.
Multiply by one of these on the side opposite to the acting side (for
both actions) results in the same element.
"""
GradingSet.__init__(self, [(gr_group1, side1), (gr_group2, side2)])
self.gr_group1, self.side1 = gr_group1, side1
self.gr_group2, self.side2 = gr_group2, side2
self.periodic_domains = periodic_domains
# Set up row system for periodic domains - combine the spinc parts
row_vecs = [d1.spinc + d2.spinc for d1, d2 in self.periodic_domains]
self.row_sys = RowSystem(row_vecs)
def multiply(self, set_elt, grp_elt):
new_data = [None, None]
for i in range(len(self.actions)):
gr_group, side = self.actions[i]
if side == ACTION_LEFT:
new_data[i] = grp_elt[i] * set_elt.data[i]
else: # side == ACTION_RIGHT
new_data[i] = set_elt.data[i] * grp_elt[i]
return SimpleDbGradingSetElement(self, new_data)
def oppMultiply(self, set_elt, grp_elt):
"""Multiply on the side opposite to that of action. Usually used for
multiplying on periodic domains. grp_elt is a list or tuple of grading
group elements.
"""
new_data = [None, None]
for i in range(len(self.actions)):
gr_group, side = self.actions[i]
if side == ACTION_LEFT:
new_data[i] = set_elt.data[i] * grp_elt[i]
else: # side == ACTION_RIGHT:
new_data[i] = grp_elt[i] * set_elt.data[i]
return SimpleDbGradingSetElement(self, new_data)
@memorize
def inverse(self):
new_domains = [(d2.inverse(), d1.inverse())
for d1, d2 in self.periodic_domains]
return SimpleDbGradingSet(self.gr_group2, oppSide(self.side2),
self.gr_group1, oppSide(self.side1),
new_domains)
@memorize
def opp(self):
new_domains = [(d2.opp(), d1.opp()) for d1, d2 in self.periodic_domains]
return SimpleDbGradingSet(self.gr_group2.opp(), oppSide(self.side2),
self.gr_group1.opp(), oppSide(self.side1),
new_domains)
@memorize
def Ropp(self):
new_domains = [(d2.Ropp(), d1.Ropp())
for d1, d2 in self.periodic_domains]
return SimpleDbGradingSet(self.gr_group2.opp(), oppSide(self.side2),
self.gr_group1.opp(), oppSide(self.side1),
new_domains)
@memorize
def partialRopp(self, action_id):
"""Take Ropp on one of the actions."""
if action_id == 0:
new_domains = [(d2, d1.Ropp()) for d1, d2 in self.periodic_domains]
return SimpleDbGradingSet(self.gr_group2, self.side2,
self.gr_group1.opp(), oppSide(self.side1), new_domains)
else:
assert action_id == 1
new_domains = [(d1, d2.Ropp()) for d1, d2 in self.periodic_domains]
return SimpleDbGradingSet(
self.gr_group1, self.side1,
self.gr_group2.opp(), oppSide(self.side2), new_domains)
def eltEquals(self, elt1, elt2):
"""Equality test for grading elements. Uses the row system for periodic
domains.
"""
(gr11, gr12), (gr21, gr22) = elt1.data, elt2.data
m1 = gr11.spinc + gr12.spinc
m2 = gr21.spinc + gr22.spinc
spinc_diff = [b-a for a, b in zip(m1, m2)]
# Get the combination of periodic domains that may give elt2 when
# multiplied with elt1
# Use the more lenient form of comparison, allowing rational multiple
# of periodic domains
comb = self.row_sys.getComb(spinc_diff, use_rational = True)
if comb is None:
return False
# Multiply these periodic domains onto elt1 on the side opposite to the
# action side.
for i in range(len(self.periodic_domains)):
to_mult = [comp.power(comb[i])
for comp in self.periodic_domains[i]]
elt1 = self.oppMultiply(elt1, to_mult)
# At least the spinc part should now be equal
assert elt1.data[0].spinc == elt2.data[0].spinc
assert elt1.data[1].spinc == elt2.data[1].spinc
# Check the (sum of) maslov part is equal
return elt1.data[0].maslov + elt1.data[1].maslov == \
elt2.data[0].maslov + elt2.data[1].maslov
def shiftRight(self, elt):
"""Use the periodic domains to change elt into a form where the first
component is zero. Returns the new element if this is possible, and
None if otherwise.
"""
spinc = elt.data[0].spinc + elt.data[1].spinc
comb, reduced_vec = self.row_sys.vecReduce(spinc, use_rational = False)
if comb is None:
return None
for i in range(len(self.periodic_domains)):
to_mult = [comp.power(-comb[i])
for comp in self.periodic_domains[i]]
elt = self.oppMultiply(elt, to_mult)
if not all([n == 0 for n in elt.data[0].spinc]):
return None
elt.data[1].maslov += elt.data[0].maslov
elt.data[0].maslov = 0
return elt
def zero(self):
"""Returns the zero element of this grading set."""
return SimpleDbGradingSetElement(self, (self.gr_group1.zero(),
self.gr_group2.zero()))
def __str__(self):
result = "Two-sided %s grading set with PMC %s on %s and %s on %s.\n" \
% (grTypeStr(self.type),
str(self.gr_group1.pmc), sideStr(self.side1),
str(self.gr_group2.pmc), sideStr(self.side2))
result += "Periodic domains:\n"
for domain in self.periodic_domains:
result += "%s\n" % strDbGrading(domain)
return result
def isAutomorphism(self):
"""Test whether the periodic domains of this grading set determine an
automorphism of grading groups. This is true if the number of periodic
domains agrees with the dimension of spinc component (of one side), and
that the square matrix formed by the periodic domains on each side is
invertible as an integer matrix.
Being an automorphism means this grading set can be tensored to another
without increasing its size.
"""
for i in range(self.gr_group1.spinc_len):
elt = SimpleDbGradingSetElement(
self, [self.gr_group1.basis(i), self.gr_group2.zero()])
if self.shiftRight(elt) is None:
return False
return True
def simplifiedSet(self):
return self
def simplifiedElt(self, elt):
return elt
def __repr__(self):
return str(self)
class SimpleDbGradingSetElement(GradingSetElement):
"""Represents an element of a SimpleDbGradingSet."""
def __init__(self, parent, data):
"""In addition to parent set, specify data as a pair of elements of
grading groups.
"""
GradingSetElement.__init__(self, parent)
self.data = data
assert len(data) == 2
assert isinstance(data[0], GroupElement)
assert isinstance(data[1], GroupElement)
assert data[0].parent == self.parent.actions[0][0]
assert data[1].parent == self.parent.actions[1][0]
def __eq__(self, other):
# Equality test need to take into account of periodic domains
return self.parent.eltEquals(self, other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Dangerous to use hash since equality test is tricky.
return NotImplemented
def __str__(self):
return strDbGrading(self.data)
def __repr__(self):
return str(self)
def inverse(self):
d1, d2 = self.data
return SimpleDbGradingSetElement(self.parent.inverse(),
(d2.inverse(), d1.inverse()))
def opp(self):
d1, d2 = self.data
return SimpleDbGradingSetElement(self.parent.opp(),
(d2.opp(), d1.opp()))
def Ropp(self):
d1, d2 = self.data
return SimpleDbGradingSetElement(self.parent.Ropp(),
(d2.Ropp(), d1.Ropp()))
def partialRopp(self, action_id):
"""Take Ropp on one of the actions."""
d1, d2 = self.data
if action_id == 0:
return SimpleDbGradingSetElement(self.parent.partialRopp(0),
(d2, d1.Ropp()))
else:
assert action_id == 1
return SimpleDbGradingSetElement(self.parent.partialRopp(1),
(d1, d2.Ropp()))
def shiftRight(self):
"""See the function of same name in SimpleDbGradingSet for details."""
return self.parent.shiftRight(self)
def simplifiedElt(self):
return self
class GeneralGradingSet(GradingSet):
"""Represents a general grading set formed by tensoring together several
simple grading sets. The right action of each component should match the
left action of the next component. The first action of the first component
and the second action of the last component, if they have two actions, form
the group action of the compound grading set.
"""
def __init__(self, comps):
"""Initialize using the list of component grading sets. Group actions
on this compound grading set can be deduced from these.
Entries in comps can be GeneralGradingSet themselves. In this case it
is expanded into its components.
"""
# Process comps by expanding possible GeneralGradingSet into components
processed_comps = []
for comp in comps:
if isinstance(comp, GeneralGradingSet):
processed_comps += comp.comps
else:
processed_comps.append(comp)
comps = processed_comps
self.comps = comps
self.num_comp = len(self.comps)
self.actions = []
self.action_comp_id = []
# Middle components must be LEFT-RIGHT
def assertLeftRight(gr_set):
"Assert gr_set is a LEFT-RIGHT double grading set."""
assert isinstance(gr_set, SimpleDbGradingSet)
assert gr_set.side1 == ACTION_LEFT and gr_set.side2 == ACTION_RIGHT
for i in range(1, self.num_comp-1):
assertLeftRight(self.comps[i])
# First component is either ?-RIGHT or RIGHT
if isinstance(self.comps[0], SimpleGradingSet):
assert self.comps[0].side == ACTION_RIGHT
else:
assert self.comps[0].side2 == ACTION_RIGHT
self.actions.append((self.comps[0].gr_group1, self.comps[0].side1))
self.action_comp_id.append((0, 0))
# Last component is either LEFT-? or LEFT
if isinstance(self.comps[-1], SimpleGradingSet):
assert self.comps[-1].side == ACTION_LEFT
else:
assert self.comps[-1].side1 == ACTION_LEFT
self.actions.append((self.comps[-1].gr_group2,
self.comps[-1].side2))
self.action_comp_id.append((-1, 1))
# Initialize GradingSet
self.type = self.comps[0].type
# Set up row system
# Each entry of row_action shows the multiplication actions to take
# when the corresponding row should be added. It is a list of
# quadruples (ID1, ID2, gr, side), where ID1 is the ID within gr_sets,
# ID2 is either 0 or 1 giving index within a gr_set (for single grading
# sets it is always 0). gr is the big/small grading element to be
# multiplied. side is the side it is multiplied to.
self.row_action = []
# Add rows corresponding to periodic domains
for i in range(self.num_comp):
gr_set = self.comps[i]
for domain in gr_set.periodic_domains:
if i == 0 and isinstance(domain, GroupElement):
cur_action = [(0, 0, domain, ACTION_LEFT)]
elif i == self.num_comp-1 and isinstance(domain, GroupElement):
cur_action = [(self.num_comp-1, 0, domain, ACTION_RIGHT)]
else:
cur_action = [(i, 0, domain[0], oppSide(gr_set.side1)),
(i, 1, domain[1], oppSide(gr_set.side2))]
self.row_action.append(cur_action)
# Add rows corresponding to tensoring
for comp_id in range(self.num_comp-1):
if isinstance(self.comps[comp_id], SimpleGradingSet):
spinc_len = self.comps[comp_id].gr_group.spinc_len
else:
spinc_len = self.comps[comp_id].gr_group2.spinc_len
for basis_id in range(spinc_len):
cur_action = self._getTensorAction(comp_id, basis_id)
self.row_action.append(cur_action)
row_vecs = [self._vecFromAction(action) for action in self.row_action]
self.row_sys = RowSystem(row_vecs)
# Row systems that exclude periodic domains on single grading sets.
# Useful for simplifying grading elements (see simplifiedElt())
self.row_action_db_only = [action for action in self.row_action
if len(action) == 2]
row_vecs_db_only = [self._vecFromAction(action)
for action in self.row_action_db_only]
self.row_sys_db_only = RowSystem(row_vecs_db_only)
# Find list of zero combinations of rows (these correspond to
# provincial periodic domains, that may introduce ambiguity in the
# maslov component.
self.zero_combs = self.row_sys.getZeroComb()
def _vecFromAction(self, vec_action):
"""Get row vector for a given action. Basically copy the spinc
components to the right locations.
"""
start_pos = dict()
cur_pos = 0
for set_id in range(self.num_comp):
gr_set = self.comps[set_id]
for action_id in range(len(gr_set.actions)):
gr_group, side = gr_set.actions[action_id]
cur_len = len(gr_group.zero().spinc)
start_pos[(set_id, action_id)] = cur_pos
cur_pos += cur_len
total_len = cur_pos
vec = [0] * total_len
for set_id, action_id, elt, side in vec_action:
spinc = elt.spinc
start = start_pos[(set_id, action_id)]
for i in range(len(spinc)):
vec[start+i] += spinc[i]
return vec
def _getTensorAction(self, comp_id, basis_id):
if comp_id == 0 and len(self.comps[0].actions) == 1:
gr_group = self.comps[0].gr_group
action_id1 = 0
else:
gr_group = self.comps[comp_id].gr_group2
action_id1 = 1
basis = gr_group.basis(basis_id)
return [(comp_id, action_id1, basis, ACTION_RIGHT),
(comp_id+1, 0, basis.inverse(), ACTION_LEFT)]
def multiply(self, set_elt, grp_elt):
elt_data = list(set_elt.data)
for i in range(len(self.actions)):
comp, action_id = self.action_comp_id[i]
# Note group action for set elements are always written on the
# right in python.
if action_id == 0:
if len(self.comps[comp].actions) == 1:
elt_data[comp] = elt_data[comp] * grp_elt[i]
else:
to_mult = [grp_elt[i],
self.comps[comp].actions[1][0].zero()]
elt_data[comp] = elt_data[comp] * to_mult
else: # action_id == 1
to_mult = [self.comps[comp].actions[0][0].zero(), grp_elt[i]]
elt_data[comp] = elt_data[comp] * to_mult
return GeneralGradingSetElement(self, elt_data)
@memorize
def inverse(self):
return GeneralGradingSet(
reversed([comp.inverse() for comp in self.comps]))
@memorize
def opp(self):
return GeneralGradingSet(reversed([comp.opp() for comp in self.comps]))
@memorize
def Ropp(self):
return GeneralGradingSet(
reversed([comp.Ropp() for comp in self.comps]))
@memorize
def partialRopp(self, action_id):
"""Take Ropp on one of the actions."""
assert len(self.actions) == 2
if action_id == 0:
raise NotImplementedError("partialRopp on first not implemented.")
else:
assert action_id == 1
return GeneralGradingSet(self.comps[0:-1] + \
[self.comps[-1].partialRopp(1)])
def _performActionListForm(self, elt_list, comb, db_only = False):
"""Perform the set of row actions given by comb on elt_list (list form
given by elt.listForm(). Return the resulting list (original list may
also be changed).
If db_only is set to True, use the db_only row system (exclude periodic
domains on single grading sets).
"""
if db_only:
action_list = self.row_action_db_only
else:
action_list = self.row_action
for i in range(len(action_list)):
vec_action = action_list[i]
for set_id, action_id, elt, side in vec_action:
to_mult = elt.power(comb[i])
if side == ACTION_LEFT:
elt_list[set_id][action_id] = \
to_mult * elt_list[set_id][action_id]
else: # side == ACTION_RIGHT
elt_list[set_id][action_id] = \
elt_list[set_id][action_id] * to_mult
return elt_list
def eltEquals(self, elt1, elt2):
"""Equality test for grading elements. Use row system to identify
actions to take and use self.row_action to carry them out.
"""
# Use more lenient form of comparison for double grading sets
if len(self.actions) == 2:
use_rational = True
else:
use_rational = False
maslov_diff, mod, spinc_diff = \
self.eltDiffShortForm(elt1, elt2, use_rational)
if spinc_diff is None:
return False
if not all([n == 0 for n in spinc_diff]):
return False
if mod == 0:
return maslov_diff == 0
if isinstance(maslov_diff, Fraction):
if maslov_diff.denominator != 1: return False
return maslov_diff.numerator % mod == 0
else: # maslov_diff is int
return maslov_diff % mod == 0
def eltDiffShortForm(self, elt1, elt2, use_rational = False):
"""Returns the difference (maslov, mod, spinc) between two elements in
short form. Measures the grading of elt1 with reference to elt2
(not the other way around!).
"""
elt1.clearFirst()
elt2.clearFirst()
# Form vectors corresponding to the two elements
m1 = sum([comp.spinc for comp in flatten(elt1.listForm())], [])
m2 = sum([comp.spinc for comp in flatten(elt2.listForm())], [])
spinc_diff = [b-a for a, b in zip(m1, m2)]
# Get the combination of row actions that give elt2, up to a difference
# in standard form, when performed on elt1
if not use_rational:
for n in spinc_diff:
if isinstance(n, Fraction) and n.denominator != 1:
return (0, 0, None)
comb, reduced_vec = self.row_sys.vecReduce(spinc_diff, use_rational)
# Perform these actions
elt1_list = self._performActionListForm(elt1.listForm(), comb)
# Find and compare the sum of maslov parts.
elt2_list = elt2.listForm()
sum1 = sum(comp.maslov for comp in flatten(elt1_list))
sum2 = sum(comp.maslov for comp in flatten(elt2_list))
maslov_diff = sum1 - sum2
# Are there zero combinations that could introduce an ambiguity?
cur_gcd = 0
for zero_comb in self.zero_combs:
elt1_list_after = self._performActionListForm(elt1_list, zero_comb)
sum_after = sum(comp.maslov for comp in flatten(elt1_list_after))
cur_gcd = gcd(cur_gcd, abs(sum_after - sum1))
mod = cur_gcd
spinc_short = self.row_sys.shortForm(reduced_vec, use_rational)
return (maslov_diff, mod, [-n for n in spinc_short])
def eltAbsoluteGrading(self, elt):
"""Returns the absolute grading of element. Element must be in the zero
spinc class.
"""
maslov, mod, spinc_short = \
self.eltDiffShortForm(elt, self.zero(), use_rational = True)
assert mod == 0 and spinc_short is not None and \
all([n == 0 for n in spinc_short])
return maslov
def zero(self):
zero_data = [gr_set.zero() for gr_set in self.comps]
return GeneralGradingSetElement(self, zero_data)
@memorize
def canSimplify(self):
"""Returns whether the general grading set can be simplified down to one
SimpleGradingSet or SimpleDbGradingSet. If the grading set has one
action, all of its double components must be automorphisms. If the
grading set has two actions, all but the first component must be
automorphisms.
"""
assert len(self.actions) > 0
if isinstance(self.comps[0], SimpleGradingSet):
return all([gr_set.isAutomorphism() for gr_set in self.comps[1:]])
elif isinstance(self.comps[-1], SimpleGradingSet):
return all([gr_set.isAutomorphism() for gr_set in self.comps[0:-1]])
else:
return all([gr_set.isAutomorphism() for gr_set in self.comps[1:]])
@memorize
def simplifiedSet(self):
"""Returns an equivalent SimpleGradingSet or SimpleDbGradingSet using
the automorphism property of its double components.
"""
assert len(self.actions) > 0
new_pdomains = []
if not self.canSimplify():
return self
if isinstance(self.comps[0], SimpleGradingSet):
for p_domain in self.comps[0].periodic_domains:
elt = p_domain
for i in range(1, len(self.comps)):
elt = SimpleDbGradingSetElement(
self.comps[i], [elt, self.comps[i].gr_group2.zero()])
elt = elt.shiftRight()
elt = elt.data[1]
new_pdomains.append(elt)
gr_group, side = self.comps[-1].actions[1]
return SimpleGradingSet(gr_group, side, new_pdomains)
elif isinstance(self.comps[-1], SimpleGradingSet):
return self.inverse().simplifiedSet().inverse()
else: # Double grading set
for p_domain in self.comps[0].periodic_domains:
elt1, elt2 = p_domain
for i in range(1, len(self.comps)):
elt2 = SimpleDbGradingSetElement(
self.comps[i], [elt2, self.comps[i].gr_group2.zero()])
elt2 = elt2.shiftRight()
elt2 = elt2.data[1]
new_pdomains.append([elt1, elt2])
gr_group1, side1 = self.comps[0].actions[0]
gr_group2, side2 = self.comps[-1].actions[1]
return SimpleDbGradingSet(
gr_group1, side1, gr_group2, side2, new_pdomains)
def simplifiedElt(self, elt):
"""Returns the version of elt in simplified form of this set."""
assert len(self.actions) > 0
if not self.canSimplify():
return elt
if isinstance(self.comps[-1], SimpleGradingSet):
return self.inverse().simplifiedElt(elt.inverse()).inverse()
spinc = sum([comp.spinc for comp in flatten(elt.listForm())], [])
comb, reduced_vec = self.row_sys_db_only.vecReduce(
spinc, use_rational = True)
comb = [-n for n in comb]
elt_list = flatten(
self._performActionListForm(elt.listForm(), comb, db_only = True))
prev_maslov_sum = 0
for comp in elt_list[:-1]:
assert all([n == 0 for n in comp.spinc])
prev_maslov_sum += comp.maslov
elt_list[-1].maslov += prev_maslov_sum
simpl_set = self.simplifiedSet()
if isinstance(self.comps[0], SimpleGradingSet):
return SimpleGradingSetElement(simpl_set, elt_list[-1])
else:
return SimpleDbGradingSetElement(
simpl_set, [simpl_set.gr_group1.zero(), elt_list[-1]])
def __str__(self):
result = "Composite grading set.\n"
result += "\n".join([str(gr_set) for gr_set in self.comps])
return result
def __repr__(self):
return str(self)
class GeneralGradingSetElement(GradingSetElement):
"""Represents an element of a GeneralGradingSet."""
def __init__(self, parent, data):
"""In addition to parent set, specify data as a list of
SimpleGradingSetElement or SimpleDbGradingSetElement, one for each
element of parent.gr_sets.
Entries in data can be GeneralGradingSetElement themselves. In this
case it is expanded into its components.
"""
GradingSetElement.__init__(self, parent)
processed_data = []
for comp in data:
if isinstance(comp, GeneralGradingSetElement):
processed_data += comp.data
else:
processed_data.append(comp)
data = processed_data
for i in range(len(data)):
assert data[i].parent == self.parent.comps[i]
self.data = data
def clearFirst(self):
"""Clear out the first part of grading in all components starting at
the second, using the tensor relations. This is useful in avoiding
fractions when comparing elements and putting their differences in
short form.
"""
for i in range(len(self.data)-1):
if isinstance(self.data[i+1], SimpleDbGradingSetElement):
data = self.data[i+1].data[0]
else:
data = self.data[i+1].data
if isinstance(self.data[i], SimpleDbGradingSetElement):
to_mult = [self.data[i].parent.gr_group1.zero(), data]
else:
to_mult = [data]
self.data[i] = self.data[i] * to_mult
if isinstance(self.data[i+1], SimpleDbGradingSetElement):
to_mult = [data.inverse(),
self.data[i+1].parent.gr_group2.zero()]
else:
to_mult = [data.inverse()]
self.data[i+1] = self.data[i+1] * to_mult
def listForm(self):
"""Present grading data in this element as a list of lists of grading
group elements, one list for each Simple(Db)GradingSetElement.
"""
result = []
for elt in self.data:
if isinstance(elt.data, GroupElement):
result.append([elt.data])
else:
result.append(list(elt.data))
return result
def __add__(self, other):
# Overridden to take care of the case of no group actions
if not isinstance(other, Number):
return NotImplemented
return GeneralGradingSetElement(self.parent,
[self.data[0] + other] + self.data[1:])
def __eq__(self, other):
# Equality test need to take into account of tensor product as well as
# periodic domains.
return self.parent.eltEquals(self, other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Dangerous to use hash since equality test is tricky.
return NotImplemented
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
def inverse(self):
return GeneralGradingSetElement(
self.parent.inverse(), reversed([d.inverse() for d in self.data]))
def opp(self):
return GeneralGradingSetElement(
self.parent.opp(), reversed([d.opp() for d in self.data]))
def Ropp(self):
return GeneralGradingSetElement(
self.parent.Ropp(), reversed([d.Ropp() for d in self.data]))
def partialRopp(self, action_id):
"""Take Ropp on one of the actions."""
assert len(self.parent.actions) == 2
if action_id == 0:
raise NotImplementedError("partialRopp on first not implemented.")
else:
assert action_id == 1
return GeneralGradingSetElement(
self.parent.partialRopp(1),
self.data[0:-1] + [self.data[-1].partialRopp(1)])
def simplifiedElt(self):
"""Returns the equivalent element in the simplified version of parent.
"""
return self.parent.simplifiedElt(self)
def strDbGrading(data):
"""data is a pair of gradings. Print in the [maslov; spinc1; spinc2]
format.
"""
total_maslov = data[0].maslov + data[1].maslov
spinc1, spinc2 = data[0].spinc, data[1].spinc
return "[%s; %s; %s]" % (str(total_maslov),
", ".join(str(n) for n in spinc1),
", ".join(str(n) for n in spinc2))
| 53,366
| 37.283357
| 80
|
py
|
bfh_python
|
bfh_python-master/algebratest.py
|
"""Unit test for algebra.py"""
from algebra import *
from pmc import splitPMC
from utility import ZZ
import unittest
class ChainComplexTest(unittest.TestCase):
def testChainComplex(self):
cx = SimpleChainComplex(F2)
gens = [SimpleGenerator(cx, "gen%d"%i) for i in range(3)]
for gen in gens:
cx.addGenerator(gen)
cx.addDifferential(gens[1], gens[0], 1)
cx.addDifferential(gens[2], gens[0], 1)
self.assertEqual(gens[0].diff(), 0)
self.assertEqual(gens[1].diff(), 1*gens[0])
self.assertEqual(gens[2].diff(), 1*gens[0])
elt12 = 1*gens[1] + 1*gens[2]
self.assertEqual(elt12.diff(), 0)
self.assertEqual(len(cx), 3)
cx.reindex()
self.assertEqual(len(cx), 3)
id = cx.id()
self.assertTrue(id.isQI())
def testChainComplexOverZ(self):
cx = SimpleChainComplex(ZZ)
gens = [SimpleGenerator(cx, "gen%d"%i) for i in range(4)]
for gen in gens:
cx.addGenerator(gen)
cx.addDifferential(gens[1], gens[0], 1)
cx.addDifferential(gens[2], gens[0], 1)
cx.addDifferential(gens[1], gens[3], 1)
cx.addDifferential(gens[2], gens[3], -1)
self.assertEqual(gens[0].diff(), 0)
self.assertEqual(gens[1].diff(), 1*gens[0]+1*gens[3])
self.assertEqual(gens[2].diff(), 1*gens[0]-1*gens[3])
elt12 = 1*gens[1] + 1*gens[2]
self.assertEqual(elt12.diff(), 2*gens[0])
class TensorStarTest(unittest.TestCase):
def testTensorStarAlgebra(self):
pmc = splitPMC(2)
sd1 = pmc.sd([(0,1),(1,2)])
sd2 = pmc.sd([4,(1,3)])
sd3 = pmc.sd([4,(0,1)])
cobarAlg = CobarAlgebra(pmc.getAlgebra())
def formGen(*seq):
return TensorStarGenerator(tuple(seq), cobarAlg)
gen1 = formGen(sd1)
gen2 = formGen(sd2)
gen3 = formGen(sd1, sd2)
gen4 = TensorStarGenerator(tuple(), cobarAlg, pmc.getIdempotents()[0])
gen5 = formGen(sd3, sd3, sd3)
# Has sd3.diff() = sd1 and sd4*sd5 = sd2
sd4 = pmc.sd([1,(0,2)])
sd5 = pmc.sd([4,(1,2)])
sd6 = pmc.sd([4,(2,3)])
self.assertEqual(gen1.diff(), 1*formGen(sd4))
self.assertEqual(gen2.diff(), 1*formGen(sd5, sd6))
self.assertEqual(gen3.diff(),
1*formGen(sd1, sd5, sd6)+1*formGen(sd4, sd2))
self.assertEqual(gen4.diff(), 0)
self.assertEqual(gen5.diff(), 0)
class SolveF2SystemTest(unittest.TestCase):
def testSolveF2System(self):
# Represents the matrix:
# [[1,1,0,0,0],
# [0,0,1,1,0],
# [0,0,0,0,1],
# [0,0,1,0,0]]
num_row, num_col = 4, 5
entries = [(0, 0), (0, 1), (1, 2), (1, 3), (2, 4), (3, 2)]
for vec, solution in [
([], []),
([0,1,2,3,4], [0,1,2]),
([0,1,4], [0,2]),
([0,1,2,4], [0,2,3]),
([0,1,3,4], [0,1,2,3]),
([0], None),]:
self.assertEqual(solveOverF2(num_row, num_col, entries, vec),
solution)
if __name__ == "__main__":
unittest.main()
| 3,191
| 34.466667
| 78
|
py
|
bfh_python
|
bfh_python-master/gradingtest.py
|
"""Unit test for grading.py"""
from grading import *
from pmc import *
import unittest
class BigGradingTest(unittest.TestCase):
def testMultiply(self):
pmc = splitPMC(1)
bgrp = BigGradingGroup(pmc)
elt1 = BigGradingElement(bgrp, 0, [1,0,0])
elt2 = BigGradingElement(bgrp, 0, [0,1,0])
elt12 = BigGradingElement(bgrp, Fraction(1,2), [1,1,0])
elt21 = BigGradingElement(bgrp, Fraction(-1,2), [1,1,0])
self.assertEqual(elt1 * elt2, elt12)
self.assertEqual(elt2 * elt1, elt21)
self.assertEqual(elt12, elt12.toSmallGrading().toBigGrading())
class SmallGradingTest(unittest.TestCase):
def testMultiply(self):
pmc = splitPMC(1)
sgrp = SmallGradingGroup(pmc)
elt1 = SmallGradingElement(sgrp, 0, [1,0])
elt2 = SmallGradingElement(sgrp, 0, [0,1])
elt12 = SmallGradingElement(sgrp, 1, [1,1])
elt21 = SmallGradingElement(sgrp, -1, [1,1])
self.assertEqual(elt1 * elt2, elt12)
self.assertEqual(elt2 * elt1, elt21)
self.assertEqual(elt1, elt1.toBigGrading().toSmallGrading())
self.assertEqual(elt2, elt2.toBigGrading().toSmallGrading())
class TestCommonRefinement(unittest.TestCase):
def testStandardRefinement(self):
pmc_to_test = [splitPMC(1), splitPMC(2), antipodalPMC(2)]
for pmc in pmc_to_test:
refinement = standardRefinement(pmc)
def testLowerRefinement(self):
pmc_to_test = [splitPMC(1), splitPMC(2), antipodalPMC(2)]
for pmc in pmc_to_test:
refinement = lowerRefinement(pmc)
def testAverageRefinement(self):
pmc_to_test = [splitPMC(1), splitPMC(2), antipodalPMC(2)]
for pmc in pmc_to_test:
refinement = averageRefinement(pmc)
class TestSimpleGradingSet(unittest.TestCase):
def testSimpleGradingSet(self):
pmc = splitPMC(2)
periodic_domains = [pmc.small_gr(0, [1,0,0,0]),
pmc.small_gr(0, [0,0,1,0])]
gr_set = SimpleGradingSet(SmallGradingGroup(pmc), ACTION_LEFT,
periodic_domains)
elt1 = gr_set.zero()
elt2 = elt1 * pmc.small_gr(0, [0,1,0,0])
elt3 = elt1 * pmc.small_gr(0, [1,0,0,0])
elt4 = SimpleGradingSetElement(gr_set, pmc.small_gr(0, [0,1,0,0]))
elt5 = elt4 * pmc.small_gr(-2, [1,0,0,0])
elt6 = elt4 * pmc.small_gr(0, [1,0,0,0])
self.assertNotEqual(elt1, elt2)
self.assertEqual(elt1, elt3)
self.assertEqual(elt4, elt5)
self.assertNotEqual(elt4, elt6)
class TestSimpleDbGradingSet(unittest.TestCase):
def testSimpleDbGradingSet(self):
pmc = splitPMC(1)
periodic_domains = [(pmc.big_gr(0, [1,1,0]), pmc.big_gr(0, [-1,-1,0])),
(pmc.big_gr(0, [0,1,1]), pmc.big_gr(0, [0,-1,-1]))]
gr_set = SimpleDbGradingSet(BigGradingGroup(pmc), ACTION_LEFT,
BigGradingGroup(pmc), ACTION_LEFT,
periodic_domains)
elt1 = gr_set.zero()
elt2 = elt1 * [pmc.big_gr(0, [-1,-1,0]), pmc.big_gr(0, [0,0,0])]
elt3 = elt1 * [pmc.big_gr(0, [-1,-1,0]), pmc.big_gr(0, [1,1,0])]
self.assertNotEqual(elt1, elt2)
self.assertEqual(elt1, elt3)
class TestGeneralGradingSet(unittest.TestCase):
def testGeneralGradingSet(self):
pmc = splitPMC(1)
periodic_domains1 = [
(pmc.big_gr(0, [1,1,0]), pmc.big_gr(0, [-1,-1,0])),
(pmc.big_gr(0, [0,1,1]), pmc.big_gr(0, [0,-1,-1]))]
gr_set1 = SimpleDbGradingSet(BigGradingGroup(pmc), ACTION_LEFT,
BigGradingGroup(pmc), ACTION_RIGHT,
periodic_domains1)
periodic_domains2 = [pmc.big_gr(0, [1,1,0])]
gr_set2 = SimpleGradingSet(BigGradingGroup(pmc), ACTION_LEFT,
periodic_domains2)
gr_set = GeneralGradingSet([gr_set1, gr_set2])
elt1 = gr_set.zero()
l2 = gr_set1.zero() * [pmc.big_gr(0, [1,1,0]), pmc.big_gr(0, [0,0,0])]
l3 = gr_set1.zero() * [pmc.big_gr(0, [0,1,1]), pmc.big_gr(0, [0,0,0])]
elt2 = GeneralGradingSetElement(gr_set, [l2, gr_set2.zero()])
elt3 = GeneralGradingSetElement(gr_set, [l3, gr_set2.zero()])
self.assertEqual(elt1, elt2)
self.assertNotEqual(elt1, elt3)
def testSimplifiedGradingSet(self):
pmc = splitPMC(1)
periodic_domains1 = [(pmc.small_gr(0, [1,1]), pmc.small_gr(0, [1,0])),
(pmc.small_gr(0, [2,3]), pmc.small_gr(0, [0,1]))]
gr_set1 = SimpleDbGradingSet(SmallGradingGroup(pmc), ACTION_LEFT,
SmallGradingGroup(pmc), ACTION_RIGHT,
periodic_domains1)
periodic_domains2 = [pmc.small_gr(0, [0,1])]
gr_set2 = SimpleGradingSet(SmallGradingGroup(pmc), ACTION_LEFT,
periodic_domains2)
gr_set = GeneralGradingSet([gr_set1, gr_set2])
gr_set_short = gr_set.simplifiedSet()
r1 = gr_set2.zero() * [pmc.small_gr(0, [1,-1])]
elt1 = GeneralGradingSetElement(gr_set, [gr_set1.zero(), r1])
elt1_short = gr_set.simplifiedElt(elt1)
# Actually simplified
self.assertTrue(isinstance(gr_set_short, SimpleGradingSet))
self.assertTrue(isinstance(elt1_short, SimpleGradingSetElement))
# elt1_short should be the 'same' element as before
l2 = gr_set1.zero() * [elt1_short.data, pmc.small_gr(0, [0,0])]
elt2 = GeneralGradingSetElement(gr_set, [l2, gr_set2.zero()])
self.assertEqual(elt1, elt2)
def testNoSimplification(self):
pmc = splitPMC(1)
# Does not form automorphism
periodic_domains1 = [(pmc.small_gr(0, [1,0]), pmc.small_gr(0, [0,0])),
(pmc.small_gr(0, [0,0]), pmc.small_gr(0, [0,1]))]
gr_set1 = SimpleDbGradingSet(SmallGradingGroup(pmc), ACTION_LEFT,
SmallGradingGroup(pmc), ACTION_RIGHT,
periodic_domains1)
periodic_domains2 = [pmc.small_gr(0, [0,1])]
gr_set2 = SimpleGradingSet(SmallGradingGroup(pmc), ACTION_LEFT,
periodic_domains2)
gr_set = GeneralGradingSet([gr_set1, gr_set2])
gr_set_short = gr_set.simplifiedSet()
self.assertEqual(gr_set, gr_set_short)
elt1 = GeneralGradingSetElement(
gr_set, [gr_set1.zero(), gr_set2.zero()])
elt1_short = gr_set.simplifiedElt(elt1)
self.assertEqual(elt1, elt1_short)
if __name__ == "__main__":
unittest.main()
| 6,731
| 44.181208
| 79
|
py
|
bfh_python
|
bfh_python-master/dastructure.py
|
"""Defines type DA structures."""
from algebra import CobarAlgebra, DGAlgebra, FreeModule, Generator, Tensor, \
TensorGenerator, TensorStarGenerator
from algebra import ChainComplex, E0, TensorDGAlgebra
from dstructure import DGenerator, SimpleDStructure
from ddstructure import DDGenerator, SimpleDDGenerator, SimpleDDStructure
from grading import GeneralGradingSet, GeneralGradingSetElement, \
SimpleDbGradingSetElement
from hdiagram import getIdentityDiagram
from pmc import StrandDiagram
from utility import MorObject, NamedObject
from utility import sumColumns
from utility import ACTION_LEFT, ACTION_RIGHT, F2
class DAGenerator(Generator):
"""Represents a generator of type DA structure. Distinguished by (python)
identity.
"""
def __init__(self, parent, idem1, idem2):
"""Every generator has two idempotents. idem1 is the type D idempotent
on the left. idem2 is the type A idempotent on the right.
"""
Generator.__init__(self, parent)
self.idem1, self.idem2 = idem1, idem2
class SimpleDAGenerator(DAGenerator, NamedObject):
"""Represents a generator of type DA structure, distinguished by name."""
def __init__(self, parent, idem1, idem2, name):
"""Specifies name in addition."""
DAGenerator.__init__(self, parent, idem1, idem2)
NamedObject.__init__(self, name)
def __str__(self):
return "%s:%s,%s" % (self.name, self.idem1, self.idem2)
def __repr__(self):
return str(self)
class DATensorDGenerator(DGenerator, tuple):
"""Generator of a type D structure formed by tensoring a type DA structure
and a type D structure. Also serves as the generator of the type D structure
formed by tensoring DD * CFAA(Id) * D, with the generator of CFAA(Id)
implicit from the idempotents.
gen_left is a generator of either a type DA structure (DA * D
interpretation) or a type DD structure (DD * CFAA(Id) * D interpretation).
gen_right is a generator of a type D structure.
"""
def __new__(cls, parent, gen_left, gen_right):
return tuple.__new__(cls, (gen_left, gen_right))
def __init__(self, parent, gen_left, gen_right):
"""Specify generators on two sides of the tensor (DA/DD and D
generators).
"""
# Note tuple initialization is automatic
DGenerator.__init__(self, parent, gen_left.idem1)
self.gen_left = gen_left
self.gen_right = gen_right
filt = []
if hasattr(gen_left, "filtration"):
filt += gen_left.filtration
if hasattr(gen_right, "filtration"):
filt += gen_right.filtration
if filt != []:
self.filtration = filt
class DATensorDDGenerator(DDGenerator, tuple):
"""Generator of a type DD structure formed by tensoring a type DA structure
and a type DD structure. Also serves as the generator of the type DD
structure formed by tensoring DD * CFAA(Id) * DD, with the generator of
CFAA(Id) implicit from the idempotents.
gen_left is a generator of either a type DA structure (DA * DD
interpretation) or a type DD structure (DA * CFAA(Id) * DD interpretation).
gen_right is a generator of a type DD structure.
"""
def __new__(cls, parent, gen_left, gen_right):
return tuple.__new__(cls, (gen_left, gen_right))
def __init__(self, parent, gen_left, gen_right):
"""Specify generators on two sides of the tensor (DA/DD and DD
generators).
"""
# Note tuple initialization is automatic
DDGenerator.__init__(self, parent, gen_left.idem1, gen_right.idem2)
self.gen_left = gen_left
self.gen_right = gen_right
filt = []
if hasattr(gen_left, "filtration"):
filt += gen_left.filtration
if hasattr(gen_right, "filtration"):
filt += gen_right.filtration
if filt != []:
self.filtration = filt
class MorDAtoDAGenerator(Generator, MorObject):
"""Represents a generator of the chain complex of bimodule morphisms from a
type DA structure to a type DA structure.
"""
def __init__(self, parent, coeff_d, coeffs_a, source, target):
"""Specifies the morphism m(source, coeffs_a) -> coeff_d * target.
source and target are generators in two type DA bimodules with same
algebra actions. If the bimodules have left type D action by algebra1
and right type A action by algebra2, then as a MorObject coeff is of
type TensorDGAlgebra(algebra1, CobarAlgebra(algebra2)).
"""
Generator.__init__(self, parent)
self.coeff_d, self.coeffs_a = coeff_d, coeffs_a
cobar_alg = CobarAlgebra(source.parent.algebra2)
tensor_alg = TensorDGAlgebra((source.parent.algebra1, cobar_alg))
coeff = TensorGenerator(
(coeff_d, TensorStarGenerator(coeffs_a, cobar_alg, source.idem2)),
tensor_alg)
MorObject.__init__(self, source, coeff, target)
def __str__(self):
coeff_d, coeffs_a = self.coeff
return "m(%s:%s; %s) = %s*%s:%s" % \
(self.source.name, self.source, coeffs_a, coeff_d,
self.target.name, self.target)
class MorDAtoDAComplex(ChainComplex):
"""Represents the complex of type DA morphisms between two type DA
structures.
"""
def __init__(self, ring, source, target):
"""Specifies the source and target DA structures."""
ChainComplex.__init__(self, ring)
assert source.algebra1 == target.algebra1 and \
source.algebra2 == target.algebra2
assert source.side1 == target.side1 and source.side2 == target.side2
self.source = source
self.target = target
def __eq__(self, other):
# Unlike other structures, MorDDtoDDComplex is distinguished by its
# source and target
return self.source == other.source and self.target == other.target
def __ne__(self, other):
return not (self == other)
def __hash__(self, other):
return hash(tuple((self.source, self.target)))
def diff(self, gen):
result = E0
x, c, y = gen.source, gen.coeff, gen.target
c_d, cs_a = gen.coeff # D-side output and list of A-side inputs
# Differential of coefficient
for dc, ring_coeff in list(c.diff().items()):
coeff_d, coeffs_a = dc
result += ring_coeff * MorDAtoDAGenerator(
self, coeff_d, coeffs_a, x, y)
# Pre-compose with differential in source
for (x1, coeffs_a), target in list(self.source.da_action.items()):
for (coeff_d, x2), ring_coeff in list(target.items()):
if x == x2 and coeff_d * c_d != E0:
result += 1*MorDAtoDAGenerator(
self, (coeff_d * c_d).getElt(), coeffs_a + cs_a, x1, y)
# Post-compose with differential in target
for (y1, coeffs_a), target in list(self.target.da_action.items()):
for (coeff_d, y2), ring_coeff in list(target.items()):
if y == y1 and c_d * coeff_d != E0:
result += 1*MorDAtoDAGenerator(
self, (c_d * coeff_d).getElt(), cs_a + coeffs_a, x, y2)
return result
def getMappingCone(self, morphism):
"""Returns the mapping cone of a morphism. This is broadly similar to
that for DDStructures.
"""
result = SimpleDAStructure(
F2, self.source.algebra1, self.source.algebra2,
self.source.side1, self.source.side2)
gen_map = dict()
for gen in self.source.getGenerators():
gen_map[gen] = SimpleDAGenerator(
result, gen.idem1, gen.idem2, "S_%s" % gen.name)
gen_map[gen].filtration = [0]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for gen in self.target.getGenerators():
gen_map[gen] = SimpleDAGenerator(
result, gen.idem1, gen.idem2, "T_%s" % gen.name)
gen_map[gen].filtration = [1]
if hasattr(gen, "filtration"):
gen_map[gen] += gen.filtration
result.addGenerator(gen_map[gen])
for (x1, coeffs_a), target in list(self.source.da_action.items()):
for (coeff_d, x2), ring_coeff in list(target.items()):
result.addDelta(
gen_map[x1], gen_map[x2], coeff_d, coeffs_a, ring_coeff)
for (y1, coeffs_a), target in list(self.target.da_action.items()):
for (coeff_d, y2), ring_coeff in list(target.items()):
result.addDelta(
gen_map[y1], gen_map[y2], coeff_d, coeffs_a, ring_coeff)
for gen, ring_coeff in list(morphism.items()):
# coeffs_a is a tuple of A-side inputs
coeff_d, coeffs_a = gen.coeff
result.addDelta(gen_map[gen.source], gen_map[gen.target],
coeff_d, tuple(coeffs_a), ring_coeff)
return result
class DAStructure(FreeModule):
"""Represents a type DA structure. delta() takes a generator and a sequence
of algebra generators (as a generator of the tensor algebra), and returns
an element in the tensor module Tensor((A,M)), where A is algebra1 (D-side
algebra).
"""
def __init__(self, ring, algebra1, algebra2, side1, side2):
"""Specifies the algebras and sides of the type DA action."""
FreeModule.__init__(self, ring)
assert isinstance(algebra1, DGAlgebra)
assert isinstance(algebra2, DGAlgebra)
self.algebra1 = algebra1
self.side1 = side1
self.algebra2 = algebra2
self.side2 = side2
# Construct A tensor M. Add diff and the left action of A on this
# tensor product.
self.AtensorM = Tensor((algebra1, self))
def _mul_A_AtensorM(xxx_todo_changeme, ACoeff):
"""To be used as rmultiply() in AtensorM. Multiply ACoeff with
AGen.
"""
(AGen, MGen) = xxx_todo_changeme
return (ACoeff * AGen) * MGen
def _diff_AtensorM(xxx_todo_changeme1):
"""To be used as diff() in AtensorM."""
(AGen, MGen) = xxx_todo_changeme1
return (AGen.diff() * MGen) + (AGen * MGen.delta())
self.AtensorM.rmultiply = _mul_A_AtensorM
self.AtensorM.diff = _diff_AtensorM
@staticmethod
def idemMatchDA(x, y, coeff_d, coeffs_a):
"""Tests whether idempotent matches in the potential arrow
x * coeffs_a -> coeff_d * y.
"""
if x.idem1 != coeff_d.left_idem or y.idem1 != coeff_d.right_idem:
return False
if len(coeffs_a) == 0:
return x.idem2 == y.idem2
else:
for i in range(len(coeffs_a)-1):
if coeffs_a[i].right_idem != coeffs_a[i+1].left_idem:
return False
return x.idem2 == coeffs_a[0].left_idem and \
y.idem2 == coeffs_a[-1].right_idem
def delta(self, MGen, algGens):
"""algGens = (a_1, ..., a_n) is an element of TensorAlgebra. Evaluates
the type DA operation delta^1(MGen; a_1, ..., a_n).
"""
raise NotImplementedError("Differential not implemented.")
def deltaPrefix(self, MGen, algGens):
"""algGens = (a_1, ..., a_n) is an element of TensorAlgebra. Returns a
boolean value indicating whether there exists an arrow in the type DA
action with starting generator MGen, and whose list of algebra
generators has a_1, ..., a_n as a *strict* prefix.
"""
raise NotImplementedError("Prefix differential not implemented.")
def rmultiply(self, MGen, AGen):
"""Multiply a generator of type DAStructure with an algebra generator
means forming the tensor.
"""
return 1*TensorGenerator((AGen, MGen), self.AtensorM)
def toSimpleDAStructure(self):
"""Using delta and deltaPrefix, product a simple DA structure (with
explicit DA action). Does not work when there are infinitely many
actions.
"""
assert self.side1 == ACTION_LEFT and self.side2 == ACTION_RIGHT
dastr = SimpleDAStructure(F2, self.algebra1, self.algebra2,
side1 = ACTION_LEFT, side2 = ACTION_RIGHT)
gen_map = dict()
for gen in self.getGenerators():
gen_map[gen] = SimpleDAGenerator(
dastr, gen.idem1, gen.idem2, gen.name)
dastr.addGenerator(gen_map[gen])
alg2_gens = [alg_gen for alg_gen in self.algebra2.getGenerators()
if not alg_gen.isIdempotent()]
def search(start_gen, cur_coeffs_a):
"""Search for terms in the action, starting from the generator
start_gen, and with the current list of A-side inputs cur_coeffs_a
(to be possibly extended).
"""
cur_delta = self.delta(start_gen, cur_coeffs_a)
for (coeff_d, gen_to), ring_coeff in list(cur_delta.items()):
dastr.addDelta(gen_map[start_gen], gen_map[gen_to], coeff_d,
cur_coeffs_a, ring_coeff)
if self.deltaPrefix(start_gen, cur_coeffs_a):
for coeff_a in alg2_gens:
search(start_gen, cur_coeffs_a + (coeff_a,))
for gen in self.getGenerators():
search(gen, ())
# Copy over Heegaard diagram and grading information
if hasattr(self, "hdiagram"):
dastr.hdiagram = self.hdiagram
dastr.hdiagram_gen_map = dict()
for dagen, hgen in list(self.hdiagram_gen_map.items()):
dastr.hdiagram_gen_map[gen_map[dagen]] = hgen
if hasattr(self, "gr_set"):
dastr.gr_set = self.gr_set
dastr.grading = dict()
for gen, gr in list(self.grading.items()):
dastr.grading[gen_map[gen]] = gr
return dastr
def tensorD(self, dstr):
"""Compute the box tensor product DA * D of this bimodule with the given
type D structure. Returns the resulting type D structure. Uses delta()
and deltaPrefix() functions of this type DA structure.
"""
dstr_result = SimpleDStructure(F2, self.algebra1)
# Compute list of generators in the box tensor product
for gen_left in self.getGenerators():
for gen_right in dstr.getGenerators():
if gen_left.idem2 == gen_right.idem:
dstr_result.addGenerator(DATensorDGenerator(
dstr_result, gen_left, gen_right))
def search(start_gen, cur_dgen, cur_coeffs_a):
"""Searching for an arrow in the box tensor product.
- start_gen: starting generator in the box tensor product. The
resulting arrow will start from here.
- cur_dgen: current location in the type D structure.
- cur_coeffs_a: current list of A-side inputs to the type DA
structure (or alternatively, list of algebra outputs produced by
the existing path through the type D structure).
"""
start_dagen, start_dgen = start_gen
cur_delta = self.delta(start_dagen, cur_coeffs_a)
for (coeff_d, gen_to), ring_coeff in list(cur_delta.items()):
dstr_result.addDelta(start_gen, DATensorDGenerator(
dstr_result, gen_to, cur_dgen), coeff_d, 1)
if self.deltaPrefix(start_dagen, cur_coeffs_a):
for (coeff_out, dgen_to), ring_coeff in \
list(dstr.delta(cur_dgen).items()):
search(start_gen, dgen_to, cur_coeffs_a + (coeff_out,))
for x in dstr_result.getGenerators():
dagen, dgen = x
search(x, dgen, ())
# Add arrows coming from idempotent output on the D-side
for (coeff_out, dgen_to), ring_coeff in list(dstr.delta(dgen).items()):
if coeff_out.isIdempotent():
dstr_result.addDelta(
x, DATensorDGenerator(dstr_result, dagen, dgen_to),
dagen.idem1.toAlgElt(self.algebra1), 1)
# Find grading set if available on both components
def tensorGradingSet():
"""Find the grading set of the new type D structure."""
return GeneralGradingSet([self.gr_set, dstr.gr_set])
def tensorGrading(gr_set, dagen, dgen):
"""Find the grading of the generator (x, y) in the tensor type D
structure. The grading set need to be provided as gr_set.
"""
return GeneralGradingSetElement(
gr_set, [self.grading[dagen], dstr.grading[dgen]])
if hasattr(self, "gr_set") and hasattr(dstr, "gr_set"):
dstr_result.gr_set = tensorGradingSet()
dstr_result.grading = dict()
for x in dstr_result.getGenerators():
dagen, dgen = x
dstr_result.grading[x] = tensorGrading(
dstr_result.gr_set, dagen, dgen)
return dstr_result
def tensorDD(self, ddstr):
"""Compute the box tensor product DA * DD of this bimodule with the
given type DD structure. Returns the resulting type DD structure. Uses
delta() and deltaPrefix() functions of this type DA structure.
"""
ddstr_result = SimpleDDStructure(F2, self.algebra1, ddstr.algebra2)
# Compute list of generators in the box tensor product
for gen_left in self.getGenerators():
for gen_right in ddstr.getGenerators():
if gen_left.idem2 == gen_right.idem1:
ddstr_result.addGenerator(DATensorDDGenerator(
ddstr_result, gen_left, gen_right))
def search(start_gen, cur_ddgen, cur_algd, cur_coeffs_a):
"""Searching for an arrow in the box tensor product.
- start_gen: starting generator in the box tensor product. The
resulting arrow will start from here.
- cur_ddgen: current location in the type DD structure.
- cur_algd: current product algebra outputs on the right side of the
DD structure.
- cur_coeffs_a: current list of A-side inputs to the type DA
structure (or alternatively, list of algebra outputs on the left
side of the DD structure).
"""
start_dagen, start_dgen = start_gen
cur_delta = self.delta(start_dagen, cur_coeffs_a)
for (coeff_d, gen_to), ring_coeff in list(cur_delta.items()):
ddstr_result.addDelta(start_gen, DATensorDDGenerator(
ddstr_result, gen_to, cur_ddgen), coeff_d, cur_algd, 1)
if self.deltaPrefix(start_dagen, cur_coeffs_a):
for (coeff_out1, coeff_out2, dgen_to), ring_coeff in \
list(ddstr.delta(cur_ddgen).items()):
new_algd = cur_algd * coeff_out2
if new_algd != E0:
search(start_gen, dgen_to, new_algd.getElt(),
cur_coeffs_a + (coeff_out1,))
for x in ddstr_result.getGenerators():
dagen, ddgen = x
search(x, ddgen, ddgen.idem2.toAlgElt(ddstr.algebra2), ())
# Add arrows coming from idempotent output on the left DD-side
for (coeff_out1, coeff_out2, dgen_to), ring_coeff in \
list(ddstr.delta(ddgen).items()):
if coeff_out1.isIdempotent():
ddstr_result.addDelta(
x, DATensorDDGenerator(ddstr_result, dagen, dgen_to),
dagen.idem1.toAlgElt(self.algebra1), coeff_out2, 1)
# Grading is omitted.
return ddstr_result
def registerHDiagram(self, diagram, base_gen, base_gr = None):
"""Associate the given diagram as the Heegaard diagram from which this
type DA structure can be derived. We will attempt to match generators
of the type DA structure to generators of the Heegaard diagram.
Currently this is possible only if no two generators have the same
idempotents (so the match can be made by comparing idempotents).
As a result, computes grading of each generator from the Heegaard
diagram and checks it against type DA operations. Attributes added are:
*. self.hdiagram - the Heegaard diagram.
*. self.hdiagram_gen_map - dictionary mapping generators in the type DA
structure to generators in Heegaard diagram.
*. self.gr_set - the grading set (of type SimpleDbGradingSet).
*. self.grading - dictionary mapping generators in the type DA
structure to their gradings.
Requires self.getGenerators() to be implemented.
"""
self.hdiagram = diagram
# Get PMC's and check that they make sense
hd_pmc1, hd_pmc2 = self.hdiagram.pmc_list
das_pmc1, das_pmc2 = self.algebra1.pmc, self.algebra2.pmc
assert hd_pmc1.opp() == das_pmc1
assert hd_pmc2 == das_pmc2
# Set of generators in bimodule and in diagram
idem_size = 2 * das_pmc1.genus - len(base_gen.idem1)
gens = self.getGenerators()
hgens = diagram.getHFGenerators(idem_size = idem_size)
# Group generators by idempotent
idem_to_gen, idem_to_hgen = dict(), dict()
self.hdiagram_gen_map = dict()
for gen in gens:
key = (gen.idem1, gen.idem2)
if key not in idem_to_gen:
idem_to_gen[key] = []
idem_to_gen[key].append(gen)
for hgen in hgens:
key = hgen.getDIdem()[0], hgen.getIdem()[1]
if key not in idem_to_hgen:
idem_to_hgen[key] = []
idem_to_hgen[key].append(hgen)
# Check counts of generators in each idempotent agree, and that there
# are at most two generators in each idempotent.
assert len(idem_to_gen) == len(idem_to_hgen)
for idem in idem_to_gen:
assert len(idem_to_gen[idem]) <= 2
assert idem in idem_to_hgen
assert len(idem_to_hgen[idem]) == len(idem_to_gen[idem])
# Compute grading in hdiagram
base_key = (base_gen.idem1, base_gen.idem2)
assert len(idem_to_gen[base_key]) == 1
assert base_gen == idem_to_gen[base_key][0]
base_hgen = idem_to_hgen[base_key][0]
self.gr_set, gr = self.hdiagram.computeDAGrading(base_hgen, base_gr)
# Compute the map from gen to hgen
for idem in idem_to_gen:
if len(idem_to_gen[idem]) == 1:
gen, hgen = idem_to_gen[idem][0], idem_to_hgen[idem][0]
self.hdiagram_gen_map[gen] = hgen
else:
# Two generators on each side. Exchange x and y so there is an
# arrow x -> i_D(y) * y in the DA action. Exchange hx and hy so
# that hx has grading one higher than hy. Then x is matched to
# hx and y is matched to hy.
x, y = idem_to_gen[idem]
hx, hy = idem_to_hgen[idem]
if gr[hy] - 1 == gr[hx]:
hx, hy = hy, hx
assert gr[hx] - 1 == gr[hy]
# Algebra element corresponding to the left idempotent.
idem_alg = x.idem1.toAlgElt(self.algebra1)
if (idem_alg * x).getElt() in self.delta(y, []):
x, y = y, x
assert (idem_alg * y).getElt() in self.delta(x, [])
self.hdiagram_gen_map[x] = hx
self.hdiagram_gen_map[y] = hy
self.grading = dict()
for gen in gens:
self.grading[gen] = gr[self.hdiagram_gen_map[gen]]
class SimpleDAStructure(DAStructure):
"""Represents a type DA structure with a finite number of generators and a
finite number of type DA operations.
"""
def __init__(self, ring, algebra1, algebra2,
side1 = ACTION_LEFT, side2 = ACTION_RIGHT):
"""Specifies the algebras and sides of the type DA action. algebra1 and
side1 are for the type D action. algebra2 and side2 are for the type A
action.
"""
assert side1 == ACTION_LEFT and side2 == ACTION_RIGHT, \
"Actions other than left/right are not implemented for DA."
DAStructure.__init__(self, ring, algebra1, algebra2, side1, side2)
self.generators = set()
self.da_action = dict()
def __len__(self):
return len(self.generators)
def getGenerators(self):
return list(self.generators)
def addGenerator(self, generator):
"""Add a generator. No effect if the generator already exists."""
assert generator.parent == self
assert isinstance(generator, DAGenerator)
self.generators.add(generator)
def addDelta(self, gen_from, gen_to, coeff_d, coeffs_a, ring_coeff):
"""Add ring_coeff * (coeff_d * gen_to) to the delta of gen_from, with
coeffs_a as A-side inputs. The arguments gen_form, gen_to, and coeff_d
should be generators, and coeff_a should be a list of generators.
"""
assert gen_from.parent == self and gen_to.parent == self
assert DAStructure.idemMatchDA(gen_from, gen_to, coeff_d, coeffs_a)
coeffs_a = tuple(coeffs_a)
target_gen = TensorGenerator((coeff_d, gen_to), self.AtensorM)
if (gen_from, coeffs_a) not in self.da_action:
self.da_action[(gen_from, coeffs_a)] = E0
self.da_action[(gen_from, coeffs_a)] += target_gen.elt(ring_coeff)
# Clean out the zero maps
if self.da_action[(gen_from, coeffs_a)] == 0:
del self.da_action[(gen_from, coeffs_a)]
def delta(self, MGen, algGens):
if len(algGens) == 1 and algGens[0].isIdempotent() and \
algGens[0].left_idem == MGen.idem2:
return MGen.idem1.toAlgElt(self.algebra1) * MGen
elif (MGen, algGens) not in self.da_action:
return E0
else:
return self.da_action[(MGen, algGens)]
def deltaPrefix(self, MGen, algGens):
# Should be called only after all addDelta has completed
if not hasattr(self, "strict_prefix"):
self.strict_prefix = set()
for (gen_from, coeffs_a), target in list(self.da_action.items()):
for i in range(len(coeffs_a)):
self.strict_prefix.add((gen_from, tuple(coeffs_a[0:i])))
return (MGen, algGens) in self.strict_prefix
def deltaPrefixNS(self, MGen, algGens):
#Non-strict version of previous.
# Should be called only after all addDelta has completed
if not hasattr(self, "non_strict_prefix"):
self.non_strict_prefix = set()
for (gen_from, coeffs_a), target in list(self.da_action.items()):
for i in range(len(coeffs_a)+1):
self.non_strict_prefix.add((gen_from, tuple(coeffs_a[0:i])))
#Include actions by idempotents.
for gen_from in self.getGenerators():
self.non_strict_prefix.add((gen_from,(gen_from.idem2.toAlgElt(self.algebra2),) ))
self.non_strict_prefix.add((gen_from,tuple()))
return (MGen, algGens) in self.non_strict_prefix
def toDDStructure(self):
"""Convert this to a type DD structure over algebra1 and cobar of
algebra2.
"""
cobar2 = CobarAlgebra(self.algebra2)
ddstr = SimpleDDStructure(self.ring, self.algebra1, cobar2)
dagen_to_ddgen_map = dict()
for gen in self.generators:
ddgen = SimpleDDGenerator(ddstr, gen.idem1, gen.idem2, gen.name)
dagen_to_ddgen_map[gen] = ddgen
ddstr.addGenerator(ddgen)
for (gen_from, coeffs_a), target in list(self.da_action.items()):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
idem = None
if len(coeffs_a) == 0:
idem = gen_from.idem2
assert idem == gen_to.idem2
cobar_gen = TensorStarGenerator(coeffs_a, cobar2, idem)
ddstr.addDelta(dagen_to_ddgen_map[gen_from],
dagen_to_ddgen_map[gen_to],
coeff_d, cobar_gen, ring_coeff)
return ddstr
def testDelta(self):
"""Verify the type DA structure equations."""
return self.toDDStructure().testDelta()
def __str__(self):
result = "Type DA Structure.\n"
for (gen_from, coeffs_a), target in list(self.da_action.items()):
result += "m(%s; %s) = %s\n" % (gen_from, coeffs_a, target)
return result
def toStrWithMultA(self, mult_a):
"""Print all arrows with the given multiplicities on the D side."""
result = "Type DA Structure.\n"
for (gen_from, coeffs_a), target in list(self.da_action.items()):
total_mult = sumColumns([coeff.multiplicity for coeff in coeffs_a],
len(mult_a))
if mult_a == total_mult:
result += "m(%s; %s) = %s\n" % (gen_from, coeffs_a, target)
return result
def restrictToMultA(self, start, end):
"""Restrict actions to those with multiplicity in the interval
(start, end). Here start and end specify points on the PMC. For example,
start, end = 0, pmc.n-1 will not change the action. Returns the new type
DA structure without changing the original structure.
"""
translate_dict = dict()
dastr = SimpleDAStructure(F2, self.algebra1, self.algebra2,
self.side1, self.side2)
for gen in self.generators:
translate_dict[gen] = SimpleDAGenerator(
dastr, gen.idem1, gen.idem2, gen.name)
dastr.addGenerator(translate_dict[gen])
mult_len = self.algebra1.pmc.n - 1
for (gen_from, coeffs_a), target in list(self.da_action.items()):
total_mult = sumColumns([coeff.multiplicity for coeff in coeffs_a],
mult_len)
if all([total_mult[i] <= 0
for i in list(range(0, start)) + list(range(end, mult_len))]):
for (coeff_d, gen_to), ring_coeff in list(target.items()):
dastr.addDelta(translate_dict[gen_from],
translate_dict[gen_to],
coeff_d, coeffs_a, ring_coeff)
return dastr
def checkGrading(self):
for (x, coeffs_a), target in list(self.da_action.items()):
for (coeff_d, y), ring_coeff in list(target.items()):
gr_x1, gr_x2 = self.grading[x].data
gr_y1, gr_y2 = self.grading[y].data
for coeff_a in coeffs_a:
gr_x2 = gr_x2 * coeff_a.getGrading()
gr_y1 = coeff_d.getGrading() * gr_y1
new_gr_x = SimpleDbGradingSetElement(
self.gr_set, [gr_x1, gr_x2])
new_gr_y = SimpleDbGradingSetElement(
self.gr_set, [gr_y1, gr_y2])
assert new_gr_x - (1-len(coeffs_a)) == new_gr_y
class ComposedDAStructure(DAStructure):
"""Type DA structure specified as a boxed tensor product of several DA
structures. The only available functions are tensorD and tensorDD, which are
defined by applying tensorD and tensorDD of component DA structures in
reversed order.
"""
def __init__(self, da_list):
"""da_list gives the list of component type DA structures. Must be
non-empty.
"""
assert len(da_list) > 0
for da in da_list:
assert da.side1 == ACTION_LEFT and da.side2 == ACTION_RIGHT
for i in range(len(da_list)-1):
assert da_list[i].algebra2 == da_list[i+1].algebra1
assert da_list[i].ring == da_list[i+1].ring
self.da_list = da_list
DAStructure.__init__(
self, da_list[0].ring, da_list[0].algebra1, da_list[-1].algebra2,
ACTION_LEFT, ACTION_RIGHT)
def tensorD(self, dstr, cancellation_constraint = None):
for da in reversed(self.da_list):
dstr = da.tensorD(dstr)
dstr.reindex()
dstr.simplify(cancellation_constraint = cancellation_constraint)
return dstr
def tensorDD(self, ddstr, cancellation_constraint = None):
for da in reversed(self.da_list):
ddstr = da.tensorDD(ddstr)
ddstr.reindex()
ddstr.simplify(cancellation_constraint = cancellation_constraint)
return ddstr
def identityDA(pmc):
"""Returns the identity type DA structure for a given PMC."""
alg = pmc.getAlgebra()
dastr = SimpleDAStructure(F2, alg, alg)
idems = pmc.getIdempotents()
idem_to_gen_map = {}
for i in range(len(idems)):
cur_gen = SimpleDAGenerator(dastr, idems[i], idems[i], i)
idem_to_gen_map[idems[i]] = cur_gen
dastr.addGenerator(cur_gen)
alg_gen = alg.getGenerators()
for gen in alg_gen:
if not gen.isIdempotent():
gen_from = idem_to_gen_map[gen.getLeftIdem()]
gen_to = idem_to_gen_map[gen.getRightIdem()]
dastr.addDelta(gen_from, gen_to, gen, (gen,), 1)
# Now add grading. Any generator can serve as base_gen
for gen in dastr.getGenerators():
base_gen = gen
break
dastr.registerHDiagram(getIdentityDiagram(pmc), base_gen)
return dastr
def augmentationDA(pmc):
"""Returns the augmentation type DA structure, coming from the projection from the algebra to the span of the basic idempotents sending all non-idempotent elements to zero."""
alg = pmc.getAlgebra()
dastr = SimpleDAStructure(F2, alg, alg)
idems = pmc.getIdempotents()
idem_to_gen_map = {}
for i in range(len(idems)):
cur_gen = SimpleDAGenerator(dastr, idems[i], idems[i], i)
idem_to_gen_map[idems[i]] = cur_gen
dastr.addGenerator(cur_gen)
# Now add grading. Any generator can serve as base_gen
for gen in dastr.getGenerators():
base_gen = gen
break
dastr.registerHDiagram(getIdentityDiagram(pmc), base_gen)
return dastr
| 34,595
| 42.299124
| 179
|
py
|
bfh_python
|
bfh_python-master/cobordismda.py
|
"""Producing type DA structures for cobordisms, using local actions."""
from arcslide import Arcslide
from arcslideda import ArcslideDA
from autocompleteda import autoCompleteDA
from cobordism import Cobordism
from cobordism import LEFT, RIGHT
from dastructure import ComposedDAStructure, DAStructure, SimpleDAGenerator
from extendbyid import ExtendedDAStructure, LocalDAStructure
from hdiagram import getCobordismDiagramLeft, getSimpleCobordismDiagram
from localpmc import LocalIdempotent, LocalStrandAlgebra, PMCSplitting
from pmc import PMC
from pmc import linearPMC
from utility import subset
from utility import F2
import ast
import itertools
class CobordismDALeft(ExtendedDAStructure):
"""Responsible for producing a type DA structure for the cobordism with
larger PMC on the left, using local actions.
"""
def __init__(self, cob):
"""Specifies the cobordism to use. cob should be of type Cobordism,
with cob.side == LEFT.
"""
assert cob.side == LEFT
self.cob = cob
self.genus, self.c_pair, self.n = cob.genus, cob.c_pair, cob.n
self.start_pmc, self.end_pmc = cob.start_pmc, cob.end_pmc
self.large_pmc, self.small_pmc = cob.large_pmc, cob.small_pmc
self.is_degenerate = cob.is_degenerate
self.c1, self.c2 = cob.c1, cob.c2
# Four possible local cases
self.MIDDLE, self.NEXT_TOP, self.TOP, self.BOTTOM = 0, 1, 2, 3
if not self.is_degenerate:
self.d, self.u = self.cob.d, self.cob.u
self.to_s, self.pair_to_s = self.cob.to_s, self.cob.pair_to_s
self.d_pair, self.u_pair = self.cob.d_pair, self.cob.u_pair
self.du_pair = self.cob.du_pair # refers to the smaller PMC.
self.sd, self.su = self.small_pmc.pairs[self.du_pair]
u1, u2 = self.large_pmc.pairs[self.u_pair]
assert u1 == self.u
if self.c_pair < 2*self.genus-2:
# First case: c-pair at least two pairs from top
self.case = self.MIDDLE
self.splitting_small = PMCSplitting(
self.small_pmc, [(self.su-1, self.su)])
self.splitting_large = PMCSplitting(
self.large_pmc, [(self.c1, u2)])
else:
# Second case: c-pair is the next-to-topmost pair
assert self.c_pair == 2*self.genus-2
assert u2 == u1 + 2
self.case = self.NEXT_TOP
self.splitting_small = PMCSplitting(
self.small_pmc, [(self.su, self.su)])
self.splitting_large = PMCSplitting(
self.large_pmc, [(self.c1, u2)])
else:
assert self.c2 == self.c1 + 2
if self.c_pair == 2*self.genus-1:
# Third case: degenerate at top
self.case = self.TOP
self.splitting_small = PMCSplitting(
self.small_pmc, [(self.n-5, self.n-5)]) # topmost point
self.splitting_large = PMCSplitting(
self.large_pmc, [(self.c1-2, self.c2)])
else:
# Fourth case: degenerate at bottom
assert self.c_pair == 0
self.case = self.BOTTOM
self.splitting_small = PMCSplitting(self.small_pmc, [(0, 0)])
self.splitting_large = PMCSplitting(self.large_pmc, [(0, 4)])
# Assumes the LEFT case
self.splitting1 = self.splitting_large
self.splitting2 = self.splitting_small
self.local_pmc1 = self.splitting1.local_pmc
self.local_pmc2 = self.splitting2.local_pmc
# Required so the left to right transition on the outside can proceed.
assert self.splitting1.outer_pmc == self.splitting2.outer_pmc
self.local_da = self.getLocalDAStructure()
if self.case == self.MIDDLE:
autoCompleteDA(self.local_da, [2, 1, 0, 5, 6])
elif self.case == self.NEXT_TOP:
autoCompleteDA(self.local_da, [2, 1, 0])
elif self.case == self.TOP:
autoCompleteDA(self.local_da, [3, 1])
else:
autoCompleteDA(self.local_da, [0, 3])
# Initiate the ExtendedDAStructure
ExtendedDAStructure.__init__(
self, self.local_da, self.splitting1, self.splitting2)
# With generators set, add grading. Any generator can serve as base_gen
for gen in self.generators:
base_gen = gen
break
self.registerHDiagram(getCobordismDiagramLeft(self.cob), base_gen)
def getLocalDAStructure(self):
"""Returns the local type DA structure associated to this cobordism."""
# Compute the set of arrow patterns
if self.case == self.MIDDLE:
patterns_raw = self._patterns_left_middle()
elif self.case == self.NEXT_TOP:
patterns_raw = self._patterns_left_next_top()
elif self.case == self.TOP:
patterns_raw = self._patterns_left_top()
else:
assert self.case == self.BOTTOM
patterns_raw = self._patterns_left_bottom()
arrow_patterns = dict()
for pattern in patterns_raw:
coeffs_a = []
for i in range(len(pattern)-1):
coeffs_a.append(self.local_pmc2.sd(pattern[i]))
coeffs_a = tuple(coeffs_a)
if coeffs_a not in arrow_patterns:
arrow_patterns[coeffs_a] = []
arrow_patterns[coeffs_a].append(self.local_pmc1.sd(pattern[-1]))
# Start construction of the local DA structure.
alg1 = LocalStrandAlgebra(F2, self.local_pmc1)
alg2 = LocalStrandAlgebra(F2, self.local_pmc2)
if self.case == self.MIDDLE:
local_dastr = LocalDAStructure(
F2, alg1, alg2, single_idems1 = [1, 3], single_idems2 = [1, 0])
else:
local_dastr = LocalDAStructure(F2, alg1, alg2)
# Compute the set of local generators.
if self.case == self.MIDDLE:
# 0 is the c-pair. u-d pairs are 1 and 2 at left and 1 at
# right. 3 at left corresponds to 0 at right is free.
da_idems = [([0], []), ([0, 2], [1]), ([0, 1], [1]),
([0, 3], [0]), ([0, 2, 3], [0, 1]), ([0, 1, 3], [0, 1])]
elif self.case == self.NEXT_TOP:
da_idems = [([0], []), ([0, 2], [0]), ([0, 1], [0])]
elif self.case == self.TOP:
da_idems = [([2], []), ([1, 2], [0])]
else:
da_idems = [([0], []), ([0, 2], [0])]
for i in range(len(da_idems)):
l_idem, r_idem = da_idems[i]
local_dastr.addGenerator(SimpleDAGenerator(
local_dastr, LocalIdempotent(self.local_pmc1, l_idem),
LocalIdempotent(self.local_pmc2, r_idem), "%d" % i))
mod_gens = local_dastr.getGenerators()
# After having added all generators, create u_map:
local_dastr.auto_u_map()
# Add arrows according to arrow_pattern.
for coeffs_a in list(arrow_patterns.keys()):
if len(coeffs_a) == 1 and coeffs_a[0].isIdempotent():
continue
for coeff_d in arrow_patterns[coeffs_a]:
for x, y in itertools.product(mod_gens, mod_gens):
if DAStructure.idemMatchDA(x, y, coeff_d, coeffs_a):
local_dastr.addDelta(x, y, coeff_d, coeffs_a, 1)
return local_dastr
@staticmethod
def _patterns_left_middle():
"""Patterns for self.side == LEFT and self.case == self.MIDDLE."""
# - Local PMC at left (D-side) is 0*-1-2-3-4-5-6-7*, with pairs
# 0:(1, 4) and 2:(3, 6), and single points 1:(2,) and 3:(5,).
# - Local PMC at right (A-side) is 0*-1-2-3*, with two single points.
# - Single point 1:(2,) at left corresponds to single point 1:(2,) at
# right, and single point 3:(5,) at left corresponds to single point
# 0:(1,) at right.
patterns_raw = [
### Seeds
([1, (2, 3)],),
([(1, 2), (3, 4)],),
([(0, 1)], [1, (0, 5)]),
([(1, 2)], [1, (5, 6)]),
([(2, 3)], [1, (6, 7)]),
]
return patterns_raw
@staticmethod
def _patterns_left_next_top():
"""Patterns for self.side == LEFT and self.case == self.NEXT_TOP."""
# - Local PMC at left (D-side) is 0*-1-2-3-4-5, with pairs 0:(1, 4) and
# 2:(3, 5), and single point 1:(2,).
# - Local PMC at right (A-side) is 0*-1, with single point 1.
patterns_raw = [
### Seeds
([1, (2, 3)],),
([(1, 2), (3, 4)],),
([(0, 1)], [1, (0, 5)]),
]
return patterns_raw
@staticmethod
def _patterns_left_top():
"""Patterns for self.side == LEFT and self.case == self.TOP."""
# - Local PMC at left (D-side) is 0*-1-2-3-4-5, with pairs 0:(1, 4) and
# 2:(3, 5), and single point 1:(2,).
# - Local PMC at right (A-side) is 0*-1, with single point 1.
patterns_raw = [
### Seeds
([(3, 5)],),
([1, (3, 5)],),
([(0, 1)], [3, (0, 2)]),
]
return patterns_raw
@staticmethod
def _patterns_left_bottom():
"""Patterns for self.side == LEFT and self.case == self.BOTTOM."""
# - Local PMC at left (D-side) is 0-1-2-3-4-5*, with pairs 0:(0, 2) and
# 1:(1, 4), and single point 2:(3,).
# - Local PMC at right (A-side) is 0-1*, with single point 0.
patterns_raw = [
### Seeds
([(0, 2)],),
([1, (0, 2)],),
([(0, 1)], [0, (3, 5)]),
]
return patterns_raw
class SimpleCobordismDA(ExtendedDAStructure):
"""Cobordisms where a genus-1 split PMC is added somewhere in the starting
PMC (on the left).
"""
def __init__(self, start_pmc, insert_pos):
"""Specifies the starting PMC, and the point of inserting the genus-1
split PMC.
- insert_pos = 0: insert at bottom
- insert_pos = start_pmc.n: insert at top
"""
self.start_pmc = start_pmc
self.insert_pos = insert_pos
# Prepare end_pmc
translate = dict()
for p in range(self.start_pmc.n):
if p < insert_pos:
translate[p] = p
else: # p >= insert_pos
translate[p] = p + 4
self.end_pmc = PMC(
[(translate[p], translate[q]) for p, q in self.start_pmc.pairs] +
[(insert_pos, insert_pos+2), (insert_pos+1, insert_pos+3)])
assert insert_pos >= 1 and insert_pos < start_pmc.n
# Possible cases
self.MIDDLE, self.NEXT_BOTTOM = 0, 1
if insert_pos >= 2:
self.case = self.MIDDLE
else:
self.case = self.NEXT_BOTTOM
self.splitting1 = PMCSplitting(
self.start_pmc, [(insert_pos-1, insert_pos-1)])
self.splitting2 = PMCSplitting(
self.end_pmc, [(insert_pos-1, insert_pos+3)])
self.local_pmc1 = self.splitting1.local_pmc
self.local_pmc2 = self.splitting2.local_pmc
# Required so the left to right transition on the outside can proceed.
assert self.splitting1.outer_pmc == self.splitting2.outer_pmc
self.local_da = self.getLocalDAStructure()
# Uncomment to use autoComplete. Note limit on len(coeffs_a) is 5.
# if self.case == self.MIDDLE:
# autoCompleteDA(self.local_da, [0, 1])
# Initiate the ExtendedDAStructure
ExtendedDAStructure.__init__(
self, self.local_da, self.splitting1, self.splitting2)
# With generators set, add grading. Choose a generator of class zero as
# base generator.
for gen in self.generators:
if gen.name[0] == "0":
base_gen = gen
break
self.registerHDiagram(
getSimpleCobordismDiagram(start_pmc, insert_pos), base_gen)
def getLocalDAStructure(self):
"""Returns the local type DA structure associated to this simple
cobordism.
"""
# Construct arrow_patterns
if self.case == self.MIDDLE:
patterns_raw = self._patterns_middle()
else:
patterns_raw = self._patterns_next_bottom()
arrow_patterns = dict()
for pattern in patterns_raw:
start_class, end_class = pattern[0], pattern[1]
coeffs_a = []
for i in range(2, len(pattern)-1):
coeffs_a.append(self.local_pmc2.sd(pattern[i]))
key = (start_class, end_class, tuple(coeffs_a))
if key not in arrow_patterns:
arrow_patterns[key] = []
arrow_patterns[key].append(self.local_pmc1.sd(pattern[-1]))
alg1 = LocalStrandAlgebra(F2, self.local_pmc1)
alg2 = LocalStrandAlgebra(F2, self.local_pmc2)
local_da = LocalDAStructure(F2, alg1, alg2)
# The original part
da_idems = [([], [2]), ([0], [0, 2])]
for i in range(len(da_idems)):
l_idem, r_idem = da_idems[i]
local_da.addGenerator(SimpleDAGenerator(
local_da, LocalIdempotent(self.local_pmc1, l_idem),
LocalIdempotent(self.local_pmc2, r_idem), "0_%d" % i))
# Part added due to finger-push
da_idems_id = [([], [1]), ([0], [0, 1])]
for i in range(len(da_idems_id)):
l_idem, r_idem = da_idems_id[i]
for gen_type in [1, 2]:
local_da.addGenerator(SimpleDAGenerator(
local_da,
LocalIdempotent(self.local_pmc1, l_idem),
LocalIdempotent(self.local_pmc2, r_idem),
"%d_%d" % (gen_type, i)))
mod_gens = local_da.getGenerators()
# Manually take care of u_maps
single_idems1 = local_da.single_idems1
single_idems2 = local_da.single_idems2
for i in range(len(single_idems1)):
i1, i2 = single_idems1[i], single_idems2[i]
for local_gen in mod_gens:
idem1, idem2 = local_gen.idem1, local_gen.idem2
if i1 in idem1 and i2 in idem2:
# local_gen is eligible for u_maps[i]
target_idem1 = idem1.removeSingleHor([i1])
target_idem2 = idem2.removeSingleHor([i2])
target_gen = [target for target in mod_gens
if target.idem1 == target_idem1 and
target.idem2 == target_idem2 and
target.name[0] == local_gen.name[0]]
assert len(target_gen) == 1
local_da.add_u_map(i, local_gen, target_gen[0])
# Check all u_map have been filled
local_da.auto_u_map()
# Add arrows according to arrow_pattern
for key in list(arrow_patterns.keys()):
start_class, end_class, coeffs_a = key
if len(coeffs_a) == 1 and coeffs_a[0].isIdempotent():
continue
for coeff_d in arrow_patterns[key]:
used = False
for x, y in itertools.product(mod_gens, mod_gens):
if x.name[0] == "%d" % start_class and \
y.name[0] == "%d" % end_class and \
DAStructure.idemMatchDA(x, y, coeff_d, coeffs_a):
local_da.addDelta(x, y, coeff_d, coeffs_a, 1)
used = True
if not used:
print("Warning: unused arrow: %s %s" % \
(coeffs_a, coeff_d))
return local_da
def _patterns_next_bottom(self):
"""Patterns in the middle case."""
# - Local PMC at left (D-side) is 0-1*.
# - Local PMC at right (A-side) is 0-1-2-3-4-5*, with pairs (1, 3)
# and (2, 4).
# Simply take _pattern_middle and subtract one.
patterns_raw_middle = self._patterns_middle()
patterns_raw = []
def translate(lst):
"""Similar to translate in arcslideda.py."""
result = []
for entry in lst:
if isinstance(entry, int):
result.append(entry-1)
if result[-1] == -1:
return None
else: # entry must be a pair
result.append((entry[0]-1, entry[1]-1))
if result[-1][0] == -1 or result[-1][1] == -1:
return None
return result
for pattern in patterns_raw_middle:
new_pattern = [translate(coeff) for coeff in pattern[2:]]
if all([entry != None for entry in new_pattern]):
patterns_raw.append([pattern[0], pattern[1]] + new_pattern)
return patterns_raw
def _patterns_middle(self):
"""Patterns in the middle case."""
# - Local PMC at left (D-side) is 0*-1-2*.
# - Local PMC at right (A-side) is 0*-1-2-3-4-5-6*, with pairs (2, 4)
# and (3, 5).
input_patterns = open("simplecob_arrows.data", "r")
patterns_raw = ast.literal_eval(input_patterns.read())
return patterns_raw
class CobordismDARight(ComposedDAStructure):
"""Produces the type DA structure for a cobordism with larger PMC on the
right, as the boxed tensor product of a SimpleCobordism with several
arcslides.
"""
def __init__(self, cob):
"""Specifies the cobordism to use. cob should be of type Cobordism,
with cob.side == RIGHT.
"""
assert cob.side == RIGHT
self.n, self.genus, self.c_pair = cob.n, cob.genus, cob.c_pair
start_pmc = linearPMC(self.genus-1)
# Divide into four cases like in CobordismDALeft
if self.c_pair == 0:
# Bottom case:
self.start_da = SimpleCobordismDA(start_pmc, 1)
self.slides = [(0, 1)]
elif self.c_pair == 2*self.genus-2:
# Next to top case:
self.start_da = SimpleCobordismDA(start_pmc, self.n-5)
self.slides = [(self.n-1, self.n-2)]
elif self.c_pair == 2*self.genus-1:
# Top case:
self.start_da = SimpleCobordismDA(start_pmc, self.n-5)
self.slides = [(self.n-2, self.n-3), (self.n-1, self.n-2)]
else:
# Middle case
self.c1 = cob.c1
self.start_da = SimpleCobordismDA(start_pmc, self.c1)
self.slides = [(self.c1+4, self.c1+3), (self.c1+1, self.c1),
(self.c1+2, self.c1+1), (self.c1+5, self.c1+4)]
self.da_list = [self.start_da]
for b1, c1 in self.slides:
self.da_list.append(
ArcslideDA(Arcslide(self.da_list[-1].pmc2, b1, c1)))
ComposedDAStructure.__init__(self, self.da_list)
| 19,009
| 39.190275
| 80
|
py
|
bfh_python
|
bfh_python-master/arcslidetest.py
|
"""Unit test for arcslide.py"""
from arcslide import *
from utility import DEFAULT_GRADING, SMALL_GRADING
import unittest
class ArcslideTest(unittest.TestCase):
def testArcslide(self):
slide1 = Arcslide(splitPMC(1), 0, 1)
self.assertEqual(slide1.c2, 3)
self.assertEqual(slide1.slide_type, OVER_SLIDE)
self.assertEqual(slide1.end_pmc, splitPMC(1))
self.assertEqual(slide1.inverse(), Arcslide(splitPMC(1), 3, 2))
slide2 = Arcslide(splitPMC(2), 3, 4)
self.assertEqual(slide2.end_pmc, PMC([(0,2),(1,6),(3,5),(4,7)]))
self.assertEqual(slide2.inverse().end_pmc, splitPMC(2))
slide3 = Arcslide(PMC([(0,2),(1,6),(3,5),(4,7)]), 5, 6)
self.assertEqual(slide3.slide_type, UNDER_SLIDE)
self.assertEqual(slide3.end_pmc, PMC([(0,3),(1,6),(2,4),(5,7)]))
def testUnderslideDDStructure(self):
slide1 = Arcslide(splitPMC(1), 1, 0)
ddstr = slide1.getDDStructure()
self.assertTrue(ddstr.testDelta())
slide2 = Arcslide(splitPMC(2), 1, 0)
ddstr2 = slide2.getDDStructure()
self.assertTrue(ddstr2.testDelta())
slide3 = Arcslide(PMC([(0,2),(1,6),(3,5),(4,7)]), 5, 6)
ddstr3 = slide3.getDDStructure()
self.assertTrue(ddstr3.testDelta())
def testOverslideDDStructure(self):
slide1 = Arcslide(splitPMC(1), 0, 1)
ddstr = slide1.getDDStructure()
self.assertTrue(ddstr.testDelta())
slide2 = Arcslide(splitPMC(2), 1, 0)
ddstr2 = slide2.getDDStructure()
self.assertTrue(ddstr2.testDelta())
slide3 = Arcslide(splitPMC(2), 3, 4)
ddstr3 = slide3.getDDStructure()
self.assertTrue(ddstr3.testDelta())
slide4 = Arcslide(splitPMC(2), 4, 3)
ddstr4 = slide4.getDDStructure()
self.assertTrue(ddstr4.testDelta())
def testGenus3DDStructure(self):
slide1 = Arcslide(splitPMC(3), 0, 1)
ddstr1 = slide1.getDDStructure()
def testArcslideMatchDiagram(self):
slide_to_test = [Arcslide(PMC([(0,2),(1,6),(3,5),(4,7)]), 5, 6),
Arcslide(splitPMC(2), 4, 3)]
for slide in slide_to_test:
ddstr = slide.getDDStructure()
if DEFAULT_GRADING == SMALL_GRADING:
self.assertTrue(ddstr.gr_set.isAutomorphism())
if __name__ == "__main__":
unittest.main()
| 2,374
| 37.306452
| 72
|
py
|
bfh_python
|
bfh_python-master/identityaa.py
|
"""Description of type AA structure for identity cobordism."""
from algebra import Generator, SimpleChainComplex, SimpleChainMorphism
from algebra import E0
from pmc import Strands, StrandDiagram
from utility import find
from utility import F2
# Convenient values for specifying sides. Use only in the context of large
# complex generators.
_LEFT, _RIGHT = 0, 1
class HomotopyAA(object):
"""Contains the description of the large chain complex for type AA
identity, and the homotopy needed to simplify it.
"""
def __init__(self, pmc):
"""Specifies the PMC."""
self.pmc = pmc
self.pmc_alg = pmc.getAlgebra()
class LargeComplexGenerator(Generator, tuple):
"""A generator of the large chain complex for type AA identity.
Specified by two strand diagrams in the given PMC.
"""
def __new__(cls, parent, sd_left, sd_right):
return tuple.__new__(cls, (sd_left, sd_right))
def __init__(self, parent, sd_left, sd_right):
"Specifies the two strand diagrams."""
# Note tuple initialization is automatic
Generator.__init__(self, parent)
def replaceLeft(self, new_left):
"""Get LargeComplexGenerator with same parent and right strand
diagram, but new left strand diagram.
"""
return self.__class__(self.parent, new_left, self[1])
def replaceRight(self, new_right):
"""Get LargeComplexGenerator with same parent and left strand
diagram, but new right strand diagram.
"""
return self.__class__(self.parent, self[0], new_right)
def _computeLargeChainComplex(self):
"""Computes the large chain complex for type AA identity."""
# Dictionary mapping total multiplicity profile to chain complexes.
self.partial_cxs = dict()
# Find the set of generators.
alg_gens = self.pmc_alg.getGenerators()
for gen_left in alg_gens:
for gen_right in alg_gens:
if gen_left.getLeftIdem() == gen_right.getLeftIdem().comp():
total_mult = [a + b for a, b in zip(
gen_left.multiplicity, gen_right.multiplicity)]
total_mult = tuple(total_mult)
if total_mult not in self.partial_cxs:
self.partial_cxs[total_mult] = SimpleChainComplex(F2)
cur_gen = self.LargeComplexGenerator(
self.partial_cxs[total_mult], gen_left, gen_right)
self.partial_cxs[total_mult].addGenerator(cur_gen)
def hasDifferential(gen_from, gen_to):
"""Determine whether there is a differential between two generators
of the large chain complex.
"""
left_from, right_from = gen_from
left_to, right_to = gen_to
mult_left_from, mult_left_to = \
left_from.multiplicity, left_to.multiplicity
diff = [a-b for a, b in zip(mult_left_from, mult_left_to)]
if all([n == 0 for n in diff]):
if left_from == left_to and right_to in right_from.diff():
return True
if right_from == right_to and left_from in left_to.diff():
return True
elif all([n == 0 or n == 1 for n in diff]):
pos_one = [i for i in range(len(diff)) if diff[i] == 1]
start, end = pos_one[0], pos_one[-1]+1
if pos_one == list(range(start, end)):
st_move = Strands(self.pmc, [(start, end)])
if not st_move.rightCompatible(left_to.getLeftIdem()):
return False
left_move = StrandDiagram(
self.pmc_alg, None, st_move, left_to.getLeftIdem())
if not st_move.rightCompatible(right_from.getLeftIdem()):
return False
right_move = StrandDiagram(
self.pmc_alg, None, st_move, right_from.getLeftIdem())
return left_move * left_to == 1*left_from and \
right_move * right_from == 1*right_to
else:
return False
# Compute differentials
for total_mult, cx in list(self.partial_cxs.items()):
gens = cx.getGenerators()
for gen_from in gens:
for gen_to in gens:
if hasDifferential(gen_from, gen_to):
cx.addDifferential(gen_from, gen_to, 1)
cx.checkDifferential()
def getHomotopyFrom(self, gen_from):
"""Returns the list of generators gen_from has homotopy to."""
sd_left, sd_right = gen_from
return [self.LargeComplexGenerator(gen_from.parent, left, right)
for left, right in homotopyMap(sd_left, sd_right)]
def _computeHomotopy(self):
"""Computes the homotopy map on the large complex."""
self.partial_hts = dict()
for total_mult, cx in list(self.partial_cxs.items()):
if all([n == 0 for n in total_mult]):
continue # No homotopy needed
cur_homotopy = SimpleChainMorphism(cx, cx)
gens = cx.getGenerators()
# Now find the homotopy map
for gen_from in gens:
homotopy_to = self.getHomotopyFrom(gen_from)
for gen_to in homotopy_to:
cur_homotopy.addMorphism(gen_from, gen_to, 1)
self.partial_hts[total_mult] = cur_homotopy
def testHomotopy(self):
"""Test the identity for homotopy."""
self._computeLargeChainComplex()
self._computeHomotopy()
for total_mult, cx in list(self.partial_cxs.items()):
if all([n == 0 for n in total_mult]):
continue # No homotopy needed
cur_homotopy = self.partial_hts[total_mult]
for gen in cx.getGenerators():
assert cur_homotopy.apply(gen.diff()) + \
cur_homotopy.apply(gen).diff() == 1 * gen
def _getIntervalOrdering(pmc):
"""Get the special ordering of intervals needed for computing the
homotopy (the order on traversing the glued circle).
"""
cur_pt = pmc.n - 1
result = []
while True:
cur_pt = pmc.otherp[cur_pt]
if cur_pt == 0:
assert len(result) == pmc.n-1
break
result.append(cur_pt - 1)
cur_pt -= 1
return result
def _moveLeft(sd_pair, a, b):
"""Move a strand (a, b) from right side to the left side. Must be
successful.
"""
sd_left, sd_right = sd_pair
new_right = _factor(sd_right, a, b)
assert new_right is not None
to_mult = StrandDiagram(sd_left.parent, None, [(a, b)],
sd_left.getLeftIdem())
new_left = to_mult * sd_left
assert new_left != E0
new_left = new_left.getElt()
return (new_left, new_right)
def _moveRight(sd_pair, a, b):
"""Move a strand (a, b) from left side to the right side. Must be
successful.
"""
sd_left, sd_right = sd_pair
new_left = _factor(sd_left, a, b)
assert new_left is not None
to_mult = StrandDiagram(sd_left.parent, None, [(a, b)],
sd_right.getLeftIdem())
new_right = to_mult * sd_right
assert new_right != E0
new_right = new_right.getElt()
return (new_left, new_right)
def _uncross(sd, start1, end1, start2, end2):
"""Uncross the two strands (start1, end1) and (start2, end2) of the given
strand diagram.
"""
strand_lst = list(sd.strands)
if start1 != end1:
strand_lst.remove((start1, end1))
if start2 != end2:
strand_lst.remove((start2, end2))
strand_lst.append((start1, end2))
strand_lst.append((start2, end1))
result_sd = StrandDiagram(sd.parent, sd.getLeftIdem(), strand_lst)
assert result_sd in sd.diff()
return result_sd
def _cross(sd, start1, end1, start2, end2):
"""Cross the two strands (start1, end1) and (start2, end2) of the
strand diagram on the given side of start_gen.
"""
strand_lst = list(sd.strands)
strand_lst.remove((start1, end1))
strand_lst.remove((start2, end2))
if start1 != end2:
strand_lst.append((start1, end2))
if start2 != end1:
strand_lst.append((start2, end1))
result_sd = StrandDiagram(sd.parent, sd.getLeftIdem(), strand_lst)
assert sd in result_sd.diff()
return result_sd
def _factor(sd, a, b):
"""Try to factor off a strand (a, b) (from the left) from sd. Returns
the new strand diagram if successful. Otherwise returns None.
"""
strand_lst = list(sd.strands)
for p, q in [(p, q) for p,q in strand_lst if p == a and q >= b]:
strand_lst.remove((p, q))
if q > b:
strand_lst.append((b, q))
result = StrandDiagram(sd.parent, None, strand_lst,
sd.getRightIdem())
to_mult = StrandDiagram(sd.parent, None, [(a, b)],
result.getLeftIdem())
if to_mult * result == 1 * sd:
return result
else:
return None
def homotopyMap(sd_left, sd_right):
"""Implements the homotopy map. Input is a pair of strand diagrams from the
same PMC. Outputs a list of (from zero to two) pairs that the homotopy maps
the input pair to.
"""
# Two helper functions
def startType(sd, pair):
"""Returns -1 if sd has horizontal strands at pair, ``n`` if sd has
strand starting at point ``n`` in the pair, -2 otherwise (left
idempotent of sd is not occupied at pair).
"""
for p, q in sd.strands:
if sd.pmc.pairid[p] == pair:
return p
if pair in sd.left_idem:
return -1
return -2
def getStrandsAtPoint(sd, point):
"""List of strands (recorded as pair (p,q)) covering the point
key_pos+1, including any double horizontal at point key_pos+1
(recorded as (key_pos+1, key_pos+1)).
"""
result = [(p, q) for p, q in sd.strands if p <= point <= q]
if sd.pmc.pairid[point] in sd.double_hor:
result.append((point, point))
return result
# To start, we find key_pos and other important locations
result = []
pmc = sd_left.pmc
assert pmc == sd_right.pmc
total_mult = [a + b for a, b in zip(sd_left.multiplicity,
sd_right.multiplicity)]
ordering = _getIntervalOrdering(pmc)
# Index of first interval with multiplicity >= 2 (note multiplicity
# cannot jump by more than 1.
lowest_two = find(total_mult, 2)
# Compute key_pos for two cases
if lowest_two == -1:
# Total multiplicity one case
unoccupied_id = [i for i in range(len(ordering))
if total_mult[ordering[i]] != 0]
assert len(unoccupied_id) > 0
key_pos = ordering[unoccupied_id[0]]
# Always use the lowest / highest possible interval, does not appear
# helpful.
# key_pos = -1
# for i in range(len(ordering)):
# if total_mult[ordering[i]] != 0 and \
# (i == 0 or total_mult[ordering[i-1]] == 0):
# if key_pos == -1 or ordering[i] > key_pos:
# key_pos = ordering[i]
total_mult_one = True
else:
# Total multiplicity >1 case
key_pos = lowest_two - 1
total_mult_one = False
# Compute key_pair and forbid_pos
key_pair = pmc.pairid[key_pos+1]
forbid_pos = pmc.otherp[key_pos+1]
assert total_mult[key_pos] != 0
if total_mult_one:
assert forbid_pos == pmc.n-1 or total_mult[forbid_pos] == 0
# Now, some more specific information about this generator
type_left, type_right = \
startType(sd_left, key_pair), startType(sd_right, key_pair)
assert type_left != forbid_pos and type_right != forbid_pos
left_avail = getStrandsAtPoint(sd_left, key_pos+1)
right_avail = getStrandsAtPoint(sd_right, key_pos+1)
total_avail = sorted([((p, q), _LEFT) for p, q in left_avail] +
[((p, q), _RIGHT) for p, q in right_avail])
assert len(total_avail) >= 2
(start1, end1), side1 = total_avail[0]
(start2, end2), side2 = total_avail[1]
# Homotopies for the three usual cases
if side1 == _LEFT and side2 == _LEFT:
# First case: both lowest available strands are on the left.
# Un-cross two strands on the left.
if end1 <= end2:
return result
result.append((_uncross(sd_left, start1, end1, start2, end2), sd_right))
elif side1 == _RIGHT and side2 == _RIGHT:
# Second case: both lowest available strands are on the right.
# Cross two strands on the right.
if end1 >= end2:
return result
result.append((sd_left, _cross(sd_right, start1, end1, start2, end2)))
elif side1 == _RIGHT and side2 == _LEFT:
# Third (and fourth case in paper). Lowest strand on the right
# and the second lowest on the left. Move strand to the left.
result.append(_moveLeft((sd_left, sd_right), start1, start2))
else:
return result
if not total_mult_one:
return result
# Three special cases only for the multiplicity one case
forbid_start = [a for a, b in sd_left.strands if b == forbid_pos]
if not forbid_start:
return result
forbid_start = forbid_start[0]
gen_start = (sd_left, sd_right)
if side1 == _RIGHT and side2 == _RIGHT:
gen_step1 = (sd_left, _cross(sd_right, start1, end1, start2, end2))
gen_step2 = _moveRight(gen_step1, forbid_start, forbid_pos)
gen_step3 = _moveLeft(gen_step2, start1, end1)
elif side1 == _RIGHT and side2 == _LEFT and forbid_start != key_pos+1:
gen_step1 = _moveLeft(gen_start, start1, key_pos+1)
gen_step2 = _moveRight(gen_step1, forbid_start, forbid_pos)
gen_step3 = (_uncross(gen_step2[0], start1, end2, key_pos+1, key_pos+1)
,gen_step2[1])
elif side1 == _RIGHT and side2 == _LEFT and forbid_start == key_pos+1:
gen_step1 = _moveLeft(gen_start, start1, key_pos+1)
gen_step2 = _moveRight(gen_step1, start1, forbid_pos)
gen_step3 = _moveLeft(gen_step2, start1, key_pos+1)
else:
return result
result.append(gen_step3)
return result
| 14,565
| 38.367568
| 80
|
py
|
bfh_python
|
bfh_python-master/hdiagramtest.py
|
"""Unit test for hdiagram.py"""
from hdiagram import *
from hdiagram import _Point, _Segment, _OrientedSegment, _Path, _Cell, \
_Domain, _OneChain
from arcslide import Arcslide
from cobordism import Cobordism
from cobordism import LEFT
from pmc import antipodalPMC, linearPMC, splitPMC
import unittest
class OrientedSegmentTest(unittest.TestCase):
def testOrientedSegment(self):
"""Testing functions in OrientedSegment."""
p1 = _Point(1)
p2 = _Point(2)
seg = _Segment(12, p1, p2)
segp = seg.oseg()
segn = segp.opp()
self.assertEqual(segp.start, p1)
self.assertEqual(segp.end, p2)
self.assertEqual(segn.start, p2)
self.assertEqual(segn.end, p1)
self.assertEqual(segp.toOneChain(), {seg : 1})
self.assertEqual(segn.toOneChain(), {seg : -1})
class PathTest(unittest.TestCase):
def setUp(self):
self.n = 10
self.pts = [_Point(i) for i in range(self.n)]
self.segs = [_Segment(i, self.pts[i], self.pts[(i+1)%self.n])
for i in range(self.n)]
self.osegs = [seg.oseg() for seg in self.segs]
self.osegsr = [seg.oseg().opp() for seg in self.segs]
self.loopseg = _Segment("loop", self.pts[0], self.pts[0])
self.oloopseg = self.loopseg.oseg()
# These should construct valid paths
self.path1 = _Path([], "empty")
self.path2 = _Path([self.oloopseg], "small_loop", True)
self.path3 = _Path(self.osegs[0:-1], "straight")
self.path4 = _Path(self.osegs, "loop", True)
self.path5 = _Path(self.osegs*2, "twoloops")
self.path6 = _Path(reversed(self.osegsr), "opploop", True)
def testPathValidity(self):
"""Testing the validity checks in Path."""
# Some FAIL tests
self.assertRaises(ValueError, _Path,
self.osegs[0:1] + self.osegs[2:])
self.assertRaises(ValueError, _Path,
self.osegs[0:-1], "notloop", True)
self.assertRaises(ValueError, _Path, self.osegsr)
def testOpp(self):
"""Testing the opp function in path."""
self.assertEqual(self.path1.opp("empty"), self.path1)
self.assertNotEqual(self.path2.opp("small_loop"), self.path2)
self.assertEqual(self.path4.opp("opploop"), self.path6)
def testToOneChain(self):
"""Testing toOneChain function in Path."""
self.assertEqual(list(self.path1.toOneChain().values()), [])
self.assertEqual(list(self.path5.toOneChain().values()), [2]*self.n)
self.assertEqual(list(self.path6.toOneChain().values()), [-1]*self.n)
class CellTest(unittest.TestCase):
def testCell(self):
"""Testing functions in Cell."""
pts = [_Point(i) for i in range(3)]
segs = [_Segment("0", pts[0], pts[1]),
_Segment("1", pts[1], pts[0]),
_Segment("2", pts[2], pts[2])]
cell1 = _Cell("c1", _Path([segs[0].oseg(), segs[1].oseg()],
iscycle = True))
cell2 = _Cell("c2", [_Path([segs[0].oseg(), segs[1].oseg()],
iscycle = True),
_Path([segs[2].oseg()], iscycle = True)])
self.assertEqual(cell1.toDomain(), {cell1 : 1})
self.assertEqual(cell1.bdOneChain(), {segs[0]:1, segs[1]:1})
self.assertEqual(cell2.bdOneChain(), {segs[0]:1, segs[1]:1, segs[2]:1})
class DomainTest(unittest.TestCase):
def testDiff(self):
"""Testing diff function in Domain."""
pts = [_Point(i) for i in range(4)]
segs = [_Segment("0", pts[0], pts[1]),
_Segment("1", pts[1], pts[2]),
_Segment("2", pts[2], pts[0]),
_Segment("3", pts[1], pts[3]),
_Segment("4", pts[3], pts[0])]
cell1 = _Cell("c1", _Path([seg.oseg() for seg in segs[0:3]],
iscycle = True))
cell2 = _Cell("c2", _Path([seg.oseg() for seg in segs[0:1]+segs[3:]],
iscycle = True).opp())
domain1 = cell1.toDomain() + cell2.toDomain()
self.assertEqual(domain1.diff(),
{segs[1]:1, segs[2]:1, segs[3]:-1, segs[4]:-1})
class DiagramBuildTest(unittest.TestCase):
def testDiagramFromCycleInfo(self):
"""Testing the function diagramFromCycleInfo."""
# A standard diagram for solid torus
diagram1 = diagramFromCycleInfo("Solid torus",
num_interior_point = 1, length_border = [4],
alpha_arcs = [[(0,0),0,(0,2)], [(0,1),(0,3)]],
beta_cycles = [[0]], crossing_orientation = [-1])
# A standard diagram for the identity cobordism
diagram2 = diagramFromCycleInfo("Identity cobordism",
num_interior_point = 4, length_border = [4,4],
alpha_arcs = [[(0,0),3,(0,2)], [(0,1),1,(0,3)],
[(1,0),0,(1,2)], [(1,1),2,(1,3)]],
beta_cycles = [[0,1],[2,3]], crossing_orientation = [1,-1,1,-1])
# A standard diagram for the anti-braid resolution
diagram3 = diagramFromCycleInfo("Antibraid resolution",
num_interior_point = 3, length_border = [4,4],
alpha_arcs = [[(0,0),2,(0,2)], [(0,1),(0,3)],
[(1,0),(1,2)], [(1,1),0,1,(1,3)]],
beta_cycles = [[0], [1,2]], crossing_orientation = [1,1,-1])
# Uncomment to see full printout of diagrams
# print repr(diagram1), repr(diagram2), repr(diagram3)
class CommonDiagramsTest(unittest.TestCase):
def testIdentityDiagram(self):
pmc_to_test = [splitPMC(1), splitPMC(2), linearPMC(2), antipodalPMC(2)]
pmc_to_test.append(PMC([(0,2),(1,6),(3,5),(4,7)]))
pmc_to_test += [splitPMC(3), antipodalPMC(4)]
genus_to_size = [2, 6, 20, 70]
for pmc in pmc_to_test:
diagram = getIdentityDiagram(pmc)
self.assertEqual(diagram.getPMCs(), [pmc.opp(), pmc])
expected_size = genus_to_size[pmc.genus-1]
self.assertEqual(len(diagram.getHFGenerators()), expected_size)
self.assertEqual(len(diagram.getPeriodicDomains()), pmc.genus*2)
def testArcslideDiagram(self):
slide_to_test = [Arcslide(splitPMC(2), 4, 3),
Arcslide(splitPMC(2), 2, 3)]
for slide in slide_to_test:
diagram = getArcslideDiagram(slide)
self.assertEqual(diagram.getPMCs(),
[slide.start_pmc.opp(), slide.end_pmc])
self.assertEqual(len(diagram.getHFGenerators()), 8)
periodic_domains = diagram.getPeriodicDomains()
self.assertEqual(len(periodic_domains), 4)
for domain in periodic_domains:
alpha_bd = diagram.restrictOneChain(domain.diff(), ALPHA)
self.assertEqual(diagram.restrictZeroChain(alpha_bd.diff()), 0)
def testHandlebodyDiagram(self):
diagram = getInfFrameDiagram(2)
self.assertEqual(diagram.getPMCs(), [splitPMC(2)])
self.assertEqual(len(diagram.getHFGenerators()), 1)
periodic_domains = diagram.getPeriodicDomains()
self.assertEqual(len(periodic_domains), 2)
for domain in periodic_domains:
alpha_bd = diagram.restrictOneChain(domain.diff(), ALPHA)
self.assertEqual(diagram.restrictZeroChain(alpha_bd.diff()), 0)
diagram2 = getPlatDiagram(2)
# Uncomment to see full printout of diagrams
# print repr(diagram), repr(diagram2)
def testAdmHandlebodyDiagram(self):
diagram = getZeroFrameAdmDiagram(2)
# Uncomment to see full printout of diagrams
# print repr(diagram)
def testSimpleCobordismDiagram(self):
diagram = getSimpleCobordismDiagram(splitPMC(1), 1)
# Uncomment to see full printout of diagrams
# print repr(diagram)
def testGetCobordismDiagramLeft(self):
for c_pair in [0, 2, 3]:
diagram = getCobordismDiagramLeft(Cobordism(2, c_pair, LEFT))
# Uncomment to see full printout of diagrams
# print repr(diagram)
def testConnectingDomain(self):
diagram = getIdentityDiagram(splitPMC(2))
gens = diagram.getHFGenerators()
for x in gens:
for y in gens:
domain = diagram.getConnectingDomain(x, y)
self.assertTrue(domain is not None)
# Alpha curves go from x to y
alpha_bd = diagram.restrictOneChain(domain.diff(), ALPHA)
self.assertEqual(diagram.restrictZeroChain(alpha_bd.diff()),
y.toZeroChain() - x.toZeroChain())
self.assertEqual(diagram.getMaslov(domain, x, y), 0)
gr = diagram.getBigGrading(domain, x, y)
self.assertEqual(gr[0] * gr[1].Ropp(), 0)
class ComputeGradingTest(unittest.TestCase):
def testDDBigGrading(self):
# Just check it will compute grading without raising errors.
# Correctness of grading is crossed checked with DD structures.
diagram = getIdentityDiagram(splitPMC(2))
base_gen = diagram.getHFGenerators()[0]
gr_set, gr_vals = diagram.computeDDGrading(base_gen)
if __name__ == "__main__":
unittest.main()
| 9,333
| 44.091787
| 79
|
py
|
bfh_python
|
bfh_python-master/algebra.py
|
"""Definitions of core algebraic objects.
Some design decisions:
Each generator can belong to only one chain complex or module. This allows for
the possibility of ``a.diff()`` where ``a`` is a Generator.
"""
import heapq
from numbers import Number
from utility import NamedObject, SummableDict
from utility import fracToInt, memorize, memorizeHash, safeMultiply
from utility import F2
class FreeModule(object):
"""Represents a free module over some ring."""
def __init__(self, ring):
"""Specifies the ring. Should be either a subclass of Ring or a python
Number.
"""
self.ring = ring
def getGenerators(self):
"""Returns a list of all generators. Need not be implemented for every
module.
"""
raise NotImplementedError("Get set of generators not implemented.")
class Generator(object):
"""Represents a generator of a free module. By default, generators are
distinguished by (python) identity. Implement __eq__, __ne__, and __hash__
for custom behavior.
"""
def __init__(self, parent):
"""Only information that every generator needs is the parent module."""
self.parent = parent
#RL added
def __lt__(self, other):
return hash(self) < hash(other)
def elt(self, coeff = 1):
"""Returns the element coeff * self."""
return self.ELT_CLASS({self : coeff})
def diff(self):
"""Returns the differential of this generator. Make sense only if
parent module implements diff().
"""
return self.parent.diff(self)
def antiDiff(self):
"""Computes the dual of differential. Make sense only if parent module
implements antiDiff().
"""
return self.parent.antiDiff(self)
def factor(self):
"""Find all ways to factor this generator into a product of two
generators. Make sense only if parent module implements factor().
"""
return self.parent.factor(self)
def delta(self):
"""Returns the delta of this generator (for type D and DD structures).
Make sense only if parent module implements delta().
"""
return self.parent.delta(self)
def __mul__(self, other):
"""Multiplies this generator by ``other`` on the left. Can be either
algebra multiplication or module action. Usually returns an element
rather than a generator.
"""
if isinstance(other, Number):
return self.elt(other)
elif hasattr(self.parent, "multiply"):
return self.parent.multiply(self, other)
else:
return NotImplemented
def __rmul__(self, other):
"""Multiplies this generator by ``other`` on the right. Usually
represents a left module action (with ``other`` an algebra generator).
"""
if isinstance(other, Number):
return self.elt(other)
elif hasattr(self.parent, "rmultiply"):
return self.parent.rmultiply(self, other)
else:
return NotImplemented
def toSimpleGenerator(self, name):
"""Convert to a SimpleGenerator with the given name. All fields are
preserved, except ``name`` which is overwritten, and _hash_val which is
removed, if present.
"""
new_obj = SimpleGenerator(self.parent, name)
new_obj.__dict__.update(self.__dict__)
new_obj.name = name # to make sure original name is overwritten
if hasattr(new_obj, '_hash_val'):
del new_obj._hash_val # reset hash value
return new_obj
class SimpleGenerator(Generator, NamedObject):
"""Generator has a name. Distinguished by name."""
def __init__(self, parent, name):
"""Specifies name and parent module."""
Generator.__init__(self, parent)
NamedObject.__init__(self, name)
class Element(SummableDict):
"""Represents an element of a free module, as a dictionary from generators
to coefficients. For example: a+2b will be represented as {a:1,b:2}.
"""
def __init__(self, data = None):
"""Corrects type of coefficients if necessary."""
if data is None:
data = {}
SummableDict.__init__(self, data)
if self:
convert = self.getElt().parent.ring.convert
for key, value in list(self.items()):
self[key] = convert(value)
def __str__(self):
if self == 0:
return "0"
terms = []
for gen, coeff in list(self.items()):
if coeff == 1:
terms.append(str(gen))
else:
terms.append(str(coeff)+"*"+str(gen))
return "+".join(terms)
def __repr__(self):
return str(self)
def __mul__(self, other):
# First try multiplying each coefficient with other, using the function
# in SummableDict.
result = SummableDict.__mul__(self, other)
if result != NotImplemented:
return result
# Now try to multiply each key by other on the left.
result = E0
for k, v in list(self.items()):
prod = safeMultiply(k, other)
if prod is NotImplemented:
return NotImplemented
result += [term * (v * coeff) for term, coeff in list(prod.items())]
return result
def __rmul__(self, other):
# First try multiplying each coefficient with other, using the function
# in SummableDict.
result = SummableDict.__rmul__(self, other)
if result != NotImplemented:
return result
# Now try to multiply key by other on the left.
result = E0
for k, v in list(self.items()):
prod = safeMultiply(other, k)
if prod is NotImplemented:
return NotImplemented
result += [term * (v * coeff) for term, coeff in list(prod.items())]
return result
def diff(self):
"""Returns the differential of this element."""
return sum([coeff * gen.diff() for gen, coeff in list(self.items())], E0)
# Name of the class for elements containing this generator
Generator.ELT_CLASS = Element
# Short-hand for empty element
E0 = Element()
class ChainComplex(FreeModule):
"""Represents a general chain complex."""
def diff(self, gen):
"""Returns the differential of a generator. """
raise NotImplementedError("Differential not implemented.")
@memorize
def _getAntiDiffMap(self):
"""Helper function generating tables of dual of differential, for calls
to antiDiff.
"""
gen_list = self.getGenerators()
antiDiffMap = {}
for gen in gen_list:
antiDiffMap[gen] = E0
for gen in gen_list:
for dgen, coeff in list(gen.diff().items()):
antiDiffMap[dgen] += coeff * gen
return antiDiffMap
def antiDiff(self, gen):
"""Returns the dual of the differential of gen, as an element of this
algebra. The element is a sum of all terms c*y, for which gen appears
in the dy with coefficient c.
By default, need getGenerators() to be implemented. antiDiff of all
generators is computed at once.
"""
return self._getAntiDiffMap()[gen]
class SimpleChainComplex(ChainComplex):
"""Represents a chain complex with a finite number of generators, with
explicitly stored generating set and differential. The generating set is
stored as a python set, and differential is stored as a dictionary mapping
from generators to elements. Each generator must be a key in the dictionary
(even if its differential is zero).
"""
def __init__(self, ring):
"""Initialize an empty chain complex."""
ChainComplex.__init__(self, ring)
self.generators = set()
self.differential = dict()
def __str__(self):
result = "Chain complex.\n"
for k, v in list(self.differential.items()):
result += "d(%s) = %s\n" % (k, v)
return result
def __repr__(self):
return str(self)
def __len__(self):
return len(self.generators)
def diff(self, generator):
return self.differential[generator]
def diffElt(self,elt):
"Return the differential of Element elt of this SimpleChainComplex"
answer = E0
for x in list(elt.keys()):
answer += elt[x]*self.diff(x)
return answer
def isCycle(self, elt):
"Return True if Element elt is a cycle in this SimpleChainComplex"
return not self.diffElt(elt)
def isBoundary(self, elt):
"Return True if Element elt is a boundary in this SimpleChainComplex"
if not self.isCycle(elt):
return False
cxcopy = self.copy(returnDicts=False)
(cxcopy2,cxcopy2_to_cx,cx_to_cxcopy2) = self.copy(returnDicts=True)
cxcopy.simplify()
newgen = SimpleGenerator(cxcopy2, "eta")
cxcopy2.addGenerator(newgen)
for y in list(elt.keys()):
cxcopy2.addDifferential(newgen, cx_to_cxcopy2[y], elt[y])
cxcopy2.simplify()
if len(cxcopy2) == len(cxcopy)-1:
return False
if len(cxcopy2) == len(cxcopy)+1:
return True
def getGenerators(self):
return list(self.generators)
def reindex(self):
"""Replace the generators by simple generators indexed by integers. The
names of the new generators are 'g1', 'g2', etc.
"""
gen_list = list(self.generators)
new_gen_list = []
# Dictionary mapping original generators to new ones
translate_dict = dict()
for i in range(len(gen_list)):
new_gen = gen_list[i].toSimpleGenerator("g%d"%(i+1))
new_gen_list.append(new_gen)
translate_dict[gen_list[i]] = new_gen
self.generators = set(new_gen_list)
new_diff = dict()
for gen, dgen in list(self.differential.items()):
new_diff[translate_dict[gen]] = dgen.translateKey(translate_dict)
self.differential = new_diff
if hasattr(self, "grading"):
new_grading = dict()
for gen, gr in list(self.grading.items()):
if gen in translate_dict: # gen is still in chain complex
new_grading[translate_dict[gen]] = gr
self.grading = new_grading
def addGenerator(self, generator):
"""Add a generator. No effect if the generator already exists."""
assert generator.parent == self
self.generators.add(generator)
if generator not in self.differential:
self.differential[generator] = E0
def addDifferential(self, gen_from, gen_to, coeff):
"""Add coeff * gen_to to the differential of gen_from. Both gen_from
and gen_to should be generators of this complex.
"""
assert gen_from.parent == self and gen_to.parent == self
self.differential[gen_from] += coeff * gen_to
def simplify(self, find_homology_basis = False,
cancellation_constraint = None):
"""Simplify a chain complex using cancellation lemma."""
# Build dictionary of coefficients
arrows = dict()
for x in self.generators:
arrows[x] = dict()
for x in self.generators:
for y, coeff in list(self.differential[x].items()):
arrows[x][y] = coeff
arrows = simplifyComplex(
arrows, F2.zero, find_homology_basis,
cancellation_constraint = cancellation_constraint)
# Now rebuild the chain complex
self.generators = set(arrows.keys())
self.differential = dict()
for x in arrows:
self.differential[x] = E0
for y, coeff in list(arrows[x].items()):
self.differential[x] += coeff * y
def checkDifferential(self):
"""Checks the relation d^2 for differentials."""
for gen in self.generators:
assert gen.diff().diff() == 0
def checkGrading(self):
"""Check grading is consistent with differentials."""
for x in self.generators:
for y, coeff in list(x.diff().items()):
assert self.grading[x] - 1 == self.grading[y]
def getGradingInfo(self):
"""Shows the distribution of gradings in an easy-to-read format."""
distr_by_spinc = dict()
for x in self.generators:
ref_grading = self.grading[x]
break
for x in self.generators:
maslov, mod, spinc = \
self.gr_set.eltDiffShortForm(self.grading[x], ref_grading)
spinc = tuple(spinc)
if spinc not in distr_by_spinc:
distr_by_spinc[spinc] = []
distr_by_spinc[spinc].append(maslov)
distr_count = dict()
for spinc, distr in list(distr_by_spinc.items()):
min_gr, max_gr = min(distr), max(distr)
cur_count = [0] * fracToInt(max_gr - min_gr + 1)
for gr in distr:
cur_count[fracToInt(gr - min_gr)] += 1
cur_count = tuple(cur_count)
if cur_count not in distr_count:
distr_count[cur_count] = 0
distr_count[cur_count] += 1
return distr_count
def getAbsGradingInfo(self):
"""Returns the list of absolute gradings of generators."""
return sorted([self.gr_set.eltAbsoluteGrading(self.grading[gen])
for gen in self.generators])
def copy(self,returnDicts=False):
"Return a copy of self. If returnDicts is true, also return two dictionaries, {new_gen:old_gen} and {old_gen:new_gen}"
new_to_old = dict()
old_to_new = dict()
answer = SimpleChainComplex(self.ring)
genlist = list(self.generators)
for i in range(len(self.generators)):
newgen = SimpleGenerator(answer,repr(i))
answer.addGenerator(newgen)
new_to_old[newgen] = genlist[i]
old_to_new[genlist[i]] = newgen
for x in genlist:
for y in list(self.differential[x].keys()):
answer.addDifferential(old_to_new[x],old_to_new[y],self.differential[x][y])
if returnDicts:
return (answer, new_to_old, old_to_new)
return answer
def id(self):
"Return the identity map of self."
answer = SimpleChainMorphism(self,self)
for x in self.getGenerators():
answer.addMorphism(x,x,1)
return answer
class SimpleChainMorphism(object):
"""Represents a morphism between two simple chain complexes (which may be
the same). Need not be a chain map (so can be used to represent homotopy,
for example). Represented explicitly.
"""
def __init__(self, cx_from, cx_to):
"""Gives the source and target chain complexes of the morphism."""
self.cx_from = cx_from
self.cx_to = cx_to
self.morphism = dict()
def addMorphism(self, gen_from, gen_to, coeff):
"""gen_from is a generator of cx_from, and gen_to is a generator of
cx_to. Adds coeff * gen_to to f(gen_from).
"""
assert gen_from.parent == self.cx_from and gen_to.parent == self.cx_to
if gen_from not in self.morphism:
self.morphism[gen_from] = E0
self.morphism[gen_from] += coeff * gen_to
def sum(self,g):
"Return self+g, where g is a SimpleChainMorphism"
assert isinstance(self, SimpleChainMorphism) and isinstance(g, SimpleChainMorphism)
assert self.cx_from == g.cx_from and self.cx_to == g.cx_to
answer = SimpleChainMorphism(self.cx_from, self.cx_to)
for gen_from in list(self.morphism.keys()):
for (gen_to,coeff) in list(self.apply(gen_from).items()):
answer.addMorphism(gen_from, gen_to, coeff)
for gen_from in list(g.morphism.keys()):
for (gen_to,coeff) in list(g.apply(gen_from).items()):
answer.addMorphism(gen_from, gen_to, coeff)
return answer
def apply(self, x):
"""Computes f(x), where x is either a generator or an element of
cx_from. The returned value is always an Element of cx_to.
"""
if isinstance(x, Generator):
assert x.parent == self.cx_from
if x not in self.morphism:
return E0
return self.morphism[x]
else:
assert isinstance(x, Element)
return sum([coeff * self.apply(gen)
for gen, coeff in list(x.items())], E0)
def mappingConeCx(self):
"Return the mapping cone of self."
cx_from = self.cx_from
cx_to = self.cx_to
answer = SimpleChainComplex(F2)
#A dictionary to keep track of identification between old generators and new ones
from_to_new = dict()
to_to_new = dict()
#Add the generators
for x in cx_from.getGenerators():
newx = Generator(answer)
from_to_new[x] = newx
answer.addGenerator(newx)
for x in cx_to.getGenerators():
newx = Generator(answer)
to_to_new[x] = newx
answer.addGenerator(newx)
#add the differential on cx_from
for x in cx_from.getGenerators():
for y in list(cx_from.diff(x).keys()):
answer.addDifferential(from_to_new[x],from_to_new[y],cx_from.diff(x)[y])
#add the differential on cx_to
for x in cx_to.getGenerators():
for y in list(cx_to.diff(x).keys()):
answer.addDifferential(to_to_new[x],to_to_new[y],cx_to.diff(x)[y])
#add the differential coming from self
for x in list(self.morphism.keys()):
fx = self.apply(x)
for y in list(fx.keys()):
answer.addDifferential(from_to_new[x],to_to_new[y],fx[y])
return answer
def isQI(self):
"Check if self is a quasi-isomorphism."
cone = self.mappingConeCx()
#Could check this is a chain map, perhaps using cone.checkDifferential()
cone.simplify()
if len(cone) == 0:
return True
return False
def __str__(self):
result = "Morphism between two chain complexes.\n"
for k, v in list(self.morphism.items()):
result += "f(%s) = %s\n" % (k, v)
return result
def __repr__(self):
return str(self)
class DGAlgebra(ChainComplex):
"""Represents a general differential-graded algebra."""
def multiply(self, gen1, gen2):
"""Returns the product of gen1 and gen2, as an algebra element."""
raise NotImplementedError("Multiply not implemented.")
@memorize
def _getFactorMap(self):
"""Helper function generating tables for factoring generators, for
calls to factor.
"""
factorMap = {}
gen_list = self.getGenerators()
gen_list_by_left_idem = dict()
for gen in gen_list:
if gen.left_idem not in gen_list_by_left_idem:
gen_list_by_left_idem[gen.left_idem] = []
gen_list_by_left_idem[gen.left_idem].append(gen)
for gen in gen_list:
factorMap[gen] = E0
# Resulting element lies in the tensor product of A with itself
parent = Tensor((self, self))
for gen1 in gen_list:
for gen2 in gen_list_by_left_idem[gen1.right_idem]:
for prod, coeff in list((gen1*gen2).items()):
tensor_gen = TensorGenerator((gen1, gen2), parent)
factorMap[prod] += coeff * tensor_gen
return factorMap
def factor(self, gen):
"""Returns an element of (A tensor A), where A is the present algebra.
The element is the sum of all terms c*(p tensor q), for which gen is a
term in p*q with coefficient c.
By default, need getGenerators() to be implemented. factor of all
generators is computed at once.
"""
return self._getFactorMap()[gen]
class Tensor(FreeModule, tuple):
"""Represents a free module whose generating set is the product of the
generating set of the two sides.
"""
def __init__(self, data):
"""Specifies the left and right modules."""
for d in data[1:]:
assert d.ring == data[0].ring
# Note tuple initialization is automatic
FreeModule.__init__(self, data[0].ring)
class TensorGenerator(Generator, tuple):
"""Represents a generator of a free module that is a tensor product of two
or more free modules. Works as a tuple of the components.
"""
def __new__(cls, data, parent = None):
return tuple.__new__(cls, tuple(data))
def __init__(self, data, parent = None):
"""If parent is None, a default is used."""
if parent is None:
parent = Tensor(tuple([comp.parent for comp in data]))
# Note tuple initialization is automatic
Generator.__init__(self, parent)
def __eq__(self, other):
return tuple.__eq__(self, other) and self.parent == other.parent
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash((tuple(self), "Tensor"))
def __str__(self):
return "**".join(str(comp) for comp in self)
def getLeftIdem(self):
"""Get the left idempotent. Only works if same function is implemented
in each component.
"""
return TensorIdempotent(tuple([comp.getLeftIdem() for comp in self]))
def getRightIdem(self):
"""Get the right idempotent. Only works if same function is implemented
in each part.
"""
return TensorIdempotent(tuple([comp.getRightIdem() for comp in self]))
class TensorElement(Element):
"""Represents an element of the tensor product of two or more modules.
TODO: Add support for quickly collecting terms by one of the components.
"""
def __init__(self, data = None, parent = None):
"""If the keys are tuples, convert them to tensor generators."""
if data is None:
data = {}
data_processed = {}
for term, coeff in list(dict(data).items()):
if isinstance(term, TensorGenerator):
data_processed[term] = coeff
else:
data_processed[TensorGenerator(term, parent)] = coeff
Element.__init__(self, data_processed)
def fixLast(self, gen, parent = None):
"""Collect terms with ``gen`` as the last factor. Return the
coefficient as either Element or TensorElement.
"""
result = E0
for term, coeff in list(self.items()):
if term[-1] == gen:
if len(term) > 2:
result += coeff * TensorGenerator(term[:-1], parent)
else: # len(term) == 2
result += coeff * term[0]
return result
def invertible(self):
"""Tests whether this element is invertible."""
for term, coeff in list(self.items()):
for comp in term:
if not (1*comp).invertible():
return False
return True
def inverse(self):
"""Returns the inverse of this element, if invertible. Undefined
behavior if the element is not invertible.
"""
return self
TensorGenerator.ELT_CLASS = TensorElement
def expandTensor(prod, parent = None):
"""Produces the tensor element formed by the tensor product of either
generators or elements.
``prod`` is a tuple of either Generator or Element, corresponding to the
components of the tensor product.
For example, ((1*A+1*B),C) expands into 1*(A,C)+1*(B,C), and
((1*A-1*B),(1*C-1*D)) expands into 1*(A,C)-1*(B,C)-1*(A,D)+1*(B,D).
``parent`` specifies the Tensor module. If it is set to None, the default
(with no additional operations defined) will be used (during the
initialization of TensorElement).
"""
assert isinstance(prod, tuple)
num_part = len(prod)
expanded = [(prod, 1)]
for i in range(num_part):
if len(expanded) == 0:
return E0
if isinstance(expanded[0][0][i], Generator):
continue
expanded2 = []
for subterm, coeff in expanded:
for gen, coeff2 in list(subterm[i].items()):
expanded2.append(((subterm[0:i]+(gen,)+subterm[i+1:]),
coeff*coeff2))
expanded = expanded2
if isinstance(parent, TensorStar):
return TensorStarElement(dict(expanded), parent)
else:
return TensorElement(dict(expanded), parent)
class TensorDGAlgebra(Tensor, DGAlgebra):
"""Tensor product of DGAlgebras is a DGAlgebra."""
def diff(self, gen):
return E0.accumulate([
expandTensor(gen[:i]+(gen[i].diff(),)+gen[i+1:], self)
for i in range(len(gen))])
def multiply(self, gen1, gen2):
if not isinstance(gen1, TensorGenerator) or gen1.parent != self:
return NotImplemented
if not isinstance(gen2, TensorGenerator) or gen2.parent != self:
return NotImplemented
return expandTensor(tuple([gen1[i]*gen2[i] for i in range(len(self))]),
self)
def getGenerators(self):
"""Return the set of generators. Use product of sets of generators of
the components. Currently only implemented for tensors of two algebras.
"""
if len(self) != 2:
return NotImplemented
gens1 = self[0].getGenerators()
gens2 = self[1].getGenerators()
result = []
for gen1 in gens1:
for gen2 in gens2:
result.append(TensorGenerator((gen1, gen2), self))
return result
class TensorIdempotent(tuple):
"""Serves as idempotent to a tensor product of algebras."""
def toAlgElt(self, parent):
"""Get the algebra element corresponding to this idempotent."""
assert isinstance(parent, TensorDGAlgebra)
return TensorGenerator(tuple([self[i].toAlgElt(parent[i])
for i in range(len(self))]), parent)
class TensorStarGenerator(Generator, tuple):
"""Represents a generator of the tensor star algebra - a tuple (possibly
with zero components) of elements in the same algebra.
"""
def __new__(cls, data, parent = None, idem = None):
return tuple.__new__(cls, tuple(data))
def __init__(self, data, parent = None, idem = None):
"""Specifies the tuple of generators, and the algebra."""
# Note tuple initialization is automatic
if parent == None:
assert len(data) > 0
parent = TensorStar(data[0].parent)
assert all([factor.parent == parent.baseModule for factor in data])
Generator.__init__(self, parent)
if len(data) > 0:
self.left_idem = data[0].getLeftIdem()
self.right_idem = data[-1].getRightIdem()
else:
assert idem != None
self.left_idem = self.right_idem = idem
def slice(self, start, end = None):
"""Returns the generator of TensorStar that contains factors in the
range [start, end) (same convention as python slicing).
If end is omitted, slice to the end of sequence.
"""
if end is None:
end = len(self)
assert start <= end
if start == end == 0:
new_left_idem = new_right_idem = self.left_idem
elif start == end == len(self):
new_left_idem = new_right_idem = self[start-1].getRightIdem()
else:
new_left_idem = self[start].getLeftIdem()
new_right_idem = self[end-1].getRightIdem()
return TensorStarGenerator(self[start:end], self.parent, new_left_idem)
def getLeftIdem(self):
return self.left_idem
def getRightIdem(self):
return self.right_idem
class TensorStarElement(Element):
"""Represents an element of the tensor star algebra."""
def __init__(self, data = None, parent = None):
"""If the keys are tuples, convert them to tensor star generators."""
if data is None:
data = {}
data_processed = {}
for term, coeff in list(dict(data).items()):
if isinstance(term, TensorStarGenerator):
data_processed[term] = coeff
else:
data_processed[TensorStarGenerator(term, parent)] = coeff
Element.__init__(self, data_processed)
class TensorStar(FreeModule):
"""Represents a free module that is the direct sum of n'th tensor product,
over n >= 0, of some free module A. So each generator is a (possibly empty)
sequence of generators of A.
"""
def __init__(self, baseModule):
"""Specifies the base module A. All generators are then tuples of
generators in A.
"""
self.baseModule = baseModule
FreeModule.__init__(self, baseModule.ring)
class CobarAlgebra(TensorStar, DGAlgebra):
"""The tensor star module over a dg-algebra can be given a dg-algebra
structure. Multiplication is by joining the two sequences, and differential
is the dual of either taking differential of one term of the sequence, or
multiplying together two adjacent terms of the sequence.
"""
def __init__(self, baseAlgebra):
"""Specifies the base algebra A. """
assert isinstance(baseAlgebra, DGAlgebra)
self.baseAlgebra = baseAlgebra
TensorStar.__init__(self, baseAlgebra)
DGAlgebra.__init__(self, baseAlgebra.ring)
def __eq__(self, other):
if not isinstance(other, CobarAlgebra):
return False
return self.baseAlgebra == other.baseAlgebra
def __ne__(self, other):
return not (self == other)
def __hash__(self, other):
return hash(("CobarAlgebra", self.baseAlgebra))
def _singleDiff(self, gen):
"""Compute the differential, in the cobar-algebra, of the generator
((gen)), where gen is a generator of the base algebra. Sum of the dual
of differential and multiplication.
antiDiff() and factor() must be implemented for generators of the base
algebra.
"""
assert gen.parent == self.baseModule
result = E0
for term, coeff in list(gen.antiDiff().items()):
result += coeff * TensorStarGenerator((term,), self)
for ((a, b), coeff) in list(gen.factor().items()):
if not (a.isIdempotent() or b.isIdempotent()):
result += coeff * TensorStarGenerator((a, b), self)
return result
def diff(self, gen):
"""Compute the differential, in the cobar-algebra, of any generator.
This is defined as either taking the anti-differential or factoring one
term of the sequence.
"""
return E0.accumulate(
[gen.slice(0,i) * self._singleDiff(gen[i]) * gen.slice(i+1)
for i in range(len(gen))])
def multiply(self, gen1, gen2):
"""Multiplication is joining two sequences. """
if not isinstance(gen1, TensorStarGenerator):
return NotImplemented
if not isinstance(gen2, TensorStarGenerator):
return NotImplemented
assert gen1.parent == self and gen2.parent == self, \
"Algebra not compatible."
# Pass gen2.right_idem in case both gen1 and gen2 have length zero.
return 1*TensorStarGenerator(tuple(gen1)+tuple(gen2), self,
gen2.right_idem)
def simplifyComplex(arrows, default_coeff = 0, find_homology_basis = False,
cancellation_constraint = None):
"""Simplify complex using the cancellation lemma.
``arrows`` specify the complex to be simplified. It is a dictionary whose
keys are generators, and values are dictionaries mapping generators to
coefficients. So ``arrows[x][y] = coeff`` means there is an arrow from x
to y with coefficient ``coeff``.
The simplification is done through the cancellation lemma: for any arrow
from x to y, with invertible coefficient ``coeff``, the generators x and y
can be cancelled as follows: remove x, y, and all arrows entering or
leaving these two generators. For each arrow from a to y with coefficient
c1, and each arrow from x to b with coefficient c2 in the previous complex,
add an arrow from a to b with coefficient c_1*(coeff^-1)*c_2. The new
coefficient is added onto the previous one if an arrow already exists from
a to b, possibly cancelling the previous arrow.
``default_coeff`` specifies the value to be used for zero coefficients. It
should be 0 if the coefficients are numbers, and E0 if they are of type
Element.
``find_homology_basis`` specifies whether to keep track of the identity of
the generators. Since very often we are only interested in finding a
homotopy equivalent chain complex or module, this is not automatically
done. However, if it is necessary to find the generators of the homology
of a chain complex, in terms of the generators of the original complex,
this parameter should be set to True. Then an attribute ``prev_meaning`` is
set for each generator of ``arrows`` in the returned result, expressing
each generator as a linear combination of generators of the original
complex.
``cancellation_constraint`` is a function taking two generators as
arguments, and returns a boolean stating whether this pair of generators may
be cancelled. If None, then any pair of generators can be cancelled. This is
usually used to specify some condition on filtrations of generators.
This implementation uses two optimizations. First, a rev_arrows dictionary
is generated and updated along with arrows, keeping for each generator y,
the list of arrows going into y. This speeds up the query for the list of
arrows going into y. Second, for each arrow from x to y, we compute its
degree (|A|-1)(|B|-1), where |A| is the set of arrows going into y, and |B|
is the set of arrows coming from x. This equals the number of arrows added
in cancelling the arrow from x to y. We try to cancel arrows in increasing
order of degree, using a priority queue (but not strictly so, as the degree
of an arrow may have changed between when it is added to queue and when it
is used for cancellation.
"""
# Produce rev_arrows
rev_arrows = dict()
for x in arrows:
rev_arrows[x] = dict()
for x in arrows:
for y in arrows[x]:
if isinstance(arrows[x][y], Element):
rev_arrows[y][x] = arrows[x][y].copy()
else:
rev_arrows[y][x] = arrows[x][y]
cancel_list = []
def tryAddEdge(x, y):
"""If the arrow from x to y is cancellable, then add it to cancel_list
(a heap), along with its degree.
"""
if cancellation_constraint is not None:
if not cancellation_constraint(x, y):
return
coeff = arrows[x][y]
if coeff.invertible():
cur_degree = (len(arrows[x])-1)*(len(rev_arrows[y])-1)
heapq.heappush(cancel_list, (cur_degree, x, y))
# Make a initial list of cancellable arrows
for x in arrows:
for y in arrows[x]:
tryAddEdge(x, y)
# Initialize prev_meaning attribute
if find_homology_basis:
for x in arrows:
x.prev_meaning = 1*x
def cancelEdge(x, y):
"""Cancel the edge from x to y."""
coeff = arrows[x][y]
assert coeff.invertible()
inv_coeff = coeff.inverse()
# List of edges going into y (other than that from x and y)
alist = [(term, coeff) for term, coeff in list(rev_arrows[y].items())
if term not in (x, y)]
# List of edges coming from x (other than that going to x and y)
blist = [(term, coeff) for term, coeff in list(arrows[x].items())
if term not in (x, y)]
# Remove all edges going into x or y
for term in arrows[x]:
if term not in (x, y):
del rev_arrows[term][x]
for term in arrows[y]:
if term not in (x, y):
del rev_arrows[term][y]
for term in rev_arrows[x]:
if term not in (x, y):
del arrows[term][x]
for term in rev_arrows[y]:
if term not in (x, y):
del arrows[term][y]
# Remove x and y
del arrows[x], arrows[y], rev_arrows[x], rev_arrows[y]
# Add arrows from alist to blist
for a, c1 in alist:
c1_invc = c1 * inv_coeff
for b, c2 in blist:
new_coeff = c1_invc * c2
if b not in arrows[a]:
arrows[a][b] = default_coeff
rev_arrows[b][a] = default_coeff
arrows[a][b] += new_coeff
rev_arrows[b][a] += new_coeff
if arrows[a][b] == 0:
del arrows[a][b], rev_arrows[b][a]
else:
tryAddEdge(a, b)
# Update prev_meaning
if find_homology_basis:
for a, c1 in alist:
a.prev_meaning += (c1 * inv_coeff) * x.prev_meaning
# Main loop: try to cancel each edge in the queue
while cancel_list:
degree, x, y = heapq.heappop(cancel_list)
if x in arrows and y in arrows[x] and arrows[x][y].invertible():
new_degree = (len(arrows[x])-1)*(len(rev_arrows[y])-1)
if new_degree > degree * 2: # add back
tryAddEdge(x, y)
else:
cancelEdge(x, y)
return arrows
def findRankOverF2(num_row, num_col, entries):
"""Find rank of a matrix over F2 with the given number of rows and columns.
entries is a list of pairs (i, j) with 0 <= i < num_row and 0 <= j < num_col
specifying where the matrix has 1's.
"""
arrows = dict()
for i in range(num_row):
arrows[("R", i)] = dict()
for i in range(num_col):
arrows[("C", i)] = dict()
for i, j in entries:
arrows[("R", i)][("C", j)] = F2.one
arrows = simplifyComplex(arrows)
cancelled = num_row + num_col - len(arrows)
assert cancelled % 2 == 0
return cancelled // 2
class _MatrixGenerator(SimpleGenerator):
"""A generator of a chain complex coming from a matrix over F2. """
def __init__(self, parent, gen_type, index):
SimpleGenerator.__init__(self, parent, (gen_type, index))
class _MatrixComplex(SimpleChainComplex):
"""Represents a chain complex coming from a matrix over F2. """
def __init__(self):
SimpleChainComplex.__init__(self, F2)
def solveOverF2(num_row, num_col, entries, vec):
"""Find a linear combination of rows that equals vec. entries has the same
format as in findRankOverF2. vec is a list of numbers v, 0 <= v < num_col,
that specifies where the combination need to have 1's.
If there is no such combination, return None.
"""
arrows = dict()
cx = _MatrixComplex()
def to_gen(gen_type, index):
return _MatrixGenerator(cx, gen_type, index)
for i in range(num_row):
arrows[to_gen("R", i)] = dict()
vec_gen = to_gen("R", -1)
arrows[vec_gen] = dict() # represents vec
for i in range(num_col):
arrows[to_gen("C", i)] = dict()
for i, j in entries:
arrows[to_gen("R", i)][to_gen("C", j)] = F2.one
for j in vec:
arrows[vec_gen][to_gen("C", j)] = F2.one
arrows = simplifyComplex(arrows, find_homology_basis = True)
kernel_gens = [gen.prev_meaning for gen in arrows if
gen.prev_meaning.getElt().name[0] == "R"]
seed_index = -1
for i in range(len(kernel_gens)):
cur_gen = kernel_gens[i]
assert all(term.name[0] == "R" for term in cur_gen)
if vec_gen in cur_gen:
seed_index = i
break
if seed_index == -1:
print("System cannot be solved.")
return None
else:
# Try to find a short vector to return
kernel_gens[0], kernel_gens[i] = kernel_gens[i], kernel_gens[0]
for i in range(1, len(kernel_gens)):
if vec_gen in kernel_gens[i]:
kernel_gens[i] += kernel_gens[0]
assert vec_gen not in kernel_gens[i]
shortened = True
while shortened:
shortened = False
for i in range(1, len(kernel_gens)):
if len(kernel_gens[0] + kernel_gens[i]) < len(kernel_gens[0]):
kernel_gens[0] += kernel_gens[i]
shortened = True
return list(sorted([term.name[1] for term in kernel_gens[0]
if term.name[1] != -1]))
| 41,139
| 36.298277
| 126
|
py
|
bfh_python
|
bfh_python-master/pmc.py
|
"""Pointed matched circle and its algebras."""
import itertools
from algebra import E0
from fractions import Fraction
from algebra import DGAlgebra, Element, Generator, Tensor, TensorGenerator
from grading import BigGradingElement, BigGradingGroup, SmallGradingElement, \
SmallGradingGroup
from grading import DEFAULT_REFINEMENT
from utility import memorize, memorizeHash
from utility import BIG_GRADING, DEFAULT_GRADING, F2, MULT_ONE, ZZ
class PMC(object):
"""Represents a pointed matched circle."""
def __init__(self, matching):
"""Creates a pointed matched circle from a list of matched pairs. The
indices start at 0.
"""
self.n = len(matching) * 2
self.num_pair = self.n // 2
self.genus = self.n // 4
# otherp[i] is the point paired to i (0 <= i < n)
self.otherp = [0] * self.n
for p, q in matching:
self.otherp[p] = q
self.otherp[q] = p
# pairid[i] is the ID of the pair containing i (0 <= pairid[i] < n/2)
self.pairid = [-1] * self.n
# pairs[i] is the pair of points with ID i (0 <= i < n/2)
self.pairs = []
pairCount = 0
for pos in range(self.n):
if self.pairid[pos] == -1:
self.pairid[pos] = self.pairid[self.otherp[pos]] = pairCount
self.pairs.append((pos, self.otherp[pos]))
pairCount += 1
def __eq__(self, other):
return self.otherp == other.otherp
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash(tuple(self.otherp))
def __str__(self):
return str(self.pairs)
def __repr__(self):
return "PMC(%s)" % str(self)
@memorize
def opp(self):
"""Returns a new PMC that represents the opposite PMC."""
return PMC([(self.n-1-p, self.n-1-q) for p, q in self.pairs])
def sd(self, data, mult_one = MULT_ONE):
"""Simple way to obtain a strand diagram for this PMC. Each element of
data is either an integer or a pair. An integer specifies a double
horizontal at this position (and its paired position). A pair (p, q)
specifies a strand from p to q.
"""
idem_size = len(data)
parent = StrandAlgebra(F2, self, idem_size, mult_one)
left_idem = []
strands = []
for d in data:
if isinstance(d, int):
left_idem.append(self.pairid[d])
else:
left_idem.append(self.pairid[d[0]])
strands.append(d)
return StrandDiagram(parent, left_idem, strands)
def idem(self, data):
"""Simple way to obtain an idempotent for this PMC. Each element of
data specifies a double horizontal at this position (and its paired
position).
"""
idem_data = [self.pairid[d] for d in data]
return Idempotent(self, idem_data)
def big_gr(self, maslov, spinc):
"""Simple way to obtain an element of the big grading group for this
PMC.
"""
grading_group = BigGradingGroup(self)
return BigGradingElement(grading_group, maslov, spinc)
def small_gr(self, maslov, spinc):
"""Simple way to obtain an element of the small grading group for this
PMC.
"""
grading_group = SmallGradingGroup(self)
return SmallGradingElement(grading_group, maslov, spinc)
def getAlgebra(self, ring = F2, idem_size = None, mult_one = MULT_ONE):
"""Returns the algebra with a given size of idempotent (the default
value, with size half the number of pairs, is most used).
"""
if idem_size == None: idem_size = self.genus
return StrandAlgebra(ring, self, idem_size, mult_one)
def getIdempotents(self, idem_size = None):
"""Get the list of all idempotents."""
if idem_size == None: idem_size = self.genus
return [Idempotent(self, data) for data in
itertools.combinations(list(range(self.num_pair)), idem_size)]
def getStrandDiagrams(self, algebra):
"""Get the list of generators of the strand algebra. algebra should be
of type StrandAlgebra.
"""
result = []
idem_size = algebra.idem_size
def helper(l_idem, r_idem, strands, pos):
# Both l_idem and r_idem are lists of pair ID's. The first
# 'pos' of them are already uesd to generate strands or double
# horizontals. 'strands' keep track of strands generated.
if pos == idem_size:
result.append(StrandDiagram(algebra, l_idem, strands))
return
for i in range(pos, idem_size):
r_idem[i], r_idem[pos] = r_idem[pos], r_idem[i]
if l_idem[pos] == r_idem[pos]:
helper(l_idem, r_idem, strands, pos+1)
for p in self.pairs[l_idem[pos]]:
for q in self.pairs[r_idem[pos]]:
if p < q:
helper(l_idem, r_idem, strands + [(p, q)], pos+1)
r_idem[i], r_idem[pos] = r_idem[pos], r_idem[i]
idems = self.getIdempotents(idem_size)
for l_idem in idems:
for r_idem in idems:
helper(list(l_idem), list(r_idem), [], 0)
# If mult_one is True, filter the generators
if algebra.mult_one is True:
result = [sd for sd in result
if all([x <= 1 for x in sd.multiplicity])]
return result
def splitPMC(genus):
"""Returns the split pmc with a given genus."""
return PMC(sum([[(4*i, 4*i+2), (4*i+1, 4*i+3)] for i in range(0,genus)],[]))
def linearPMC(genus):
"""Returns the linear pmc with a given genus."""
matching = [(0, 2),(4*genus-3, 4*genus-1)]
matching += [(2*i-1, 2*i+2) for i in range(1, 2*genus-1)]
return PMC(matching)
def antipodalPMC(genus):
"""Returns the antipodal pmc with a given genus."""
return PMC([(i, 2*genus+i) for i in range(2*genus)])
def connectSumPMC(pmc1, pmc2):
"""Return the connect sum of two PMC's."""
pairs2 = [(p+pmc1.n, q+pmc1.n) for p, q in pmc2.pairs]
return PMC(pmc1.pairs + pairs2)
@memorize
def unconnectSumPMC(pmc, genus1):
"""Returns a pair (pmc1, pmc2) such that pmc1 has genus1 and
pmc1 # pmc2 = pmc.
"""
cut_point = 4 * genus1
for p, q in pmc.pairs:
assert (p < cut_point and q < cut_point) or \
(p >= cut_point and q >= cut_point)
pmc1 = PMC([(p, q) for p, q in pmc.pairs if p < cut_point])
pmc2 = PMC([(p-cut_point, q-cut_point)
for p, q in pmc.pairs if p >= cut_point])
return (pmc1, pmc2)
class Idempotent(tuple):
"""Represents an idempotent in a certain PMC. Stored as a tuple of pairid
of occupied pairs.
"""
def __new__(cls, pmc, data):
return tuple.__new__(cls, tuple(sorted(data)))
def __init__(self, pmc, data):
self.pmc = pmc
def __eq__(self, other):
if isinstance(other, Idempotent):
return self.pmc == other.pmc and tuple.__eq__(self, other)
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.pmc, tuple(self), "Idempotent"))
def __str__(self):
return repr(self)
def __repr__(self):
return "(%s)" % ",".join(str(self.pmc.pairs[i]) for i in self)
@memorize
def opp(self):
"""Get the same idempotent in the opposite PMC."""
pmc, pmcopp = self.pmc, self.pmc.opp()
return Idempotent(pmcopp, [pmcopp.pairid[pmc.n-1-pmc.pairs[i][0]]
for i in self])
def comp(self):
"""Get the complementary idempotent in the same PMC."""
return Idempotent(self.pmc,
set(range(self.pmc.num_pair))-set(self))
def toAlgElt(self, parent):
"""Get the strand diagram corresponding to this idempotent, in the
specified strand algebra.
"""
return StrandDiagram(parent, self, [])
def unconnectSumIdem(idem, genus1):
"""Returns the pair of idempotents (idem1, idem2) in (pmc1, pmc2), where
(pmc1, pmc2) is the pair returned by unconnectSumPMC(idem.pmc, genus1).
"""
cut_pair = 2 * genus1
pmc1, pmc2 = unconnectSumPMC(idem.pmc, genus1)
return (Idempotent(pmc1, [pair for pair in idem if pair < cut_pair]),
Idempotent(pmc2,
[pair-cut_pair for pair in idem if pair >= cut_pair]))
class Strands(tuple):
"""Represents a (fixed) list of strands in a certain PMC. Stored as a tuple
of pairs.
"""
def __new__(cls, pmc, data):
return tuple.__new__(cls, tuple(sorted(data)))
def __init__(self, pmc, data):
self.pmc = pmc
# Compute multiplicity at each interval
self.multiplicity = [0] * (self.pmc.n - 1)
for st in self:
assert len(st) == 2 and st[0] < st[1]
for pos in range(st[0], st[1]):
self.multiplicity[pos] += 1
def __eq__(self, other):
if isinstance(other, Strands):
return self.pmc == other.pmc and tuple.__eq__(self, other)
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.pmc, tuple(self), "Strands"))
def __str__(self):
return "(%s)" % ",".join("%d->%d" % (s, t) for s, t in self)
def __repr__(self):
return str(self)
def opp(self):
"""Returns the same strands (with direction reversed) in the opposite
PMC.
"""
n = self.pmc.n
return Strands(self.pmc.opp(), [(n-1-q, n-1-p) for p, q in self])
def leftCompatible(self, idem):
"""Tests whether this set of strands is compatible with a given left
idempotent.
"""
return self.propagateRight(idem) is not None
def rightCompatible(self, idem):
"""Tests whether this set of strands is compatible with a given right
idempotent.
"""
return self.propagateLeft(idem) is not None
def idemCompatible(self, left_idem, right_idem):
"""Tests whether this set of strands is compatible with the given
idempotents on the two sides. Note this is not the same as left and
right compatible.
"""
return self.leftCompatible(left_idem) and \
self.propagateRight(left_idem) == right_idem
def propagateRight(self, left_idem):
"""Find the right_idem given left_idem and strand info. If not
compatible, return None.
"""
idemCount = [0] * self.pmc.num_pair
for pair in left_idem:
idemCount[pair] += 1
for st in self:
if idemCount[self.pmc.pairid[st[0]]] == 0: return None
idemCount[self.pmc.pairid[st[0]]] -= 1
for st in self:
if idemCount[self.pmc.pairid[st[1]]] == 1: return None
idemCount[self.pmc.pairid[st[1]]] += 1
right_idem = [i for i in range(self.pmc.n//2) if idemCount[i] == 1]
return Idempotent(self.pmc, right_idem)
def propagateLeft(self, right_idem):
"""Find the left_idem given right_idem and strand info. If not
compatible, return None.
"""
idemCount = [0] * self.pmc.num_pair
for pair in right_idem:
idemCount[pair] += 1
for st in self:
if idemCount[self.pmc.pairid[st[1]]] == 0: return None
idemCount[self.pmc.pairid[st[1]]] -= 1
for st in self:
if idemCount[self.pmc.pairid[st[0]]] == 1: return None
idemCount[self.pmc.pairid[st[0]]] += 1
left_idem = [i for i in range(self.pmc.n//2) if idemCount[i] == 1]
return Idempotent(self.pmc, left_idem)
def isMultOne(self):
"""Tests whether this set of strands have total multiplicity <= 1
everywhere.
"""
return all([n <= 1 for n in self.multiplicity])
def unconnectSumStrands(strands, genus1):
"""Returns pairs of strands (strand1, strand2) in (pmc1, pmc2), where
(pmc1, pmc2) is the pair returned by unconnectSumPMC(strands.pmc, genus1).
"""
cut_pos = 4 * genus1
pmc1, pmc2 = unconnectSumPMC(strands.pmc, genus1)
for p, q in strands:
assert q < cut_pos or p >= cut_pos
return (Strands(pmc1, [(p, q) for p, q in strands if q < cut_pos]),
Strands(pmc2, [(p-cut_pos, q-cut_pos)
for p, q in strands if p >= cut_pos]))
class StrandDiagram(Generator):
"""Represents a strand diagram, or a generator of the strand algebra."""
def __init__(self, parent, left_idem, strands, right_idem = None):
"""Specifies PMC, left idempotent and right idempotent as list of pair
ID's, and strands as a list of pairs (start, end).
For example, in the split PMC of genus 2, the strand diagram with
double horizontal at (1,3) and strand from 2 to 5 would be encoded as:
left_idem = [1,2], right_idem = [1,3], strands = [(2,5)], since pair
(1,3) has index 1, pair (2,4) has index 2, and pair (5,7) has index 3.
"""
Generator.__init__(self, parent)
self.pmc = parent.pmc
self.mult_one = parent.mult_one
self.strands = strands
if not isinstance(self.strands, Strands):
self.strands = Strands(self.pmc, self.strands)
# Calculate left idempotent if necessary
if left_idem is None:
assert right_idem is not None
left_idem = self.strands.propagateLeft(right_idem)
self.left_idem = left_idem
if not isinstance(self.left_idem, Idempotent):
self.left_idem = Idempotent(self.pmc, self.left_idem)
# Calculate right idempotent if necessary
if right_idem is None:
right_idem = self.strands.propagateRight(self.left_idem)
assert right_idem is not None, \
"Invalid init data for strand diagram: cannot propagate to right."
self.right_idem = right_idem
if not isinstance(self.right_idem, Idempotent):
self.right_idem = Idempotent(self.pmc, self.right_idem)
# Enumerate double horizontals
self.double_hor = list(self.left_idem)
for st in self.strands:
self.double_hor.remove(self.pmc.pairid[st[0]])
self.double_hor = tuple(self.double_hor)
# Get multiplicity from strands
self.multiplicity = self.strands.multiplicity
@memorize
def getBigGrading(self):
return self.pmc.big_gr(self.maslov(), self.multiplicity)
@memorize
def getSmallGrading(self, refinement = DEFAULT_REFINEMENT):
refine_data = refinement(self.pmc, len(self.left_idem))
p_l, p_r = [refine_data[i] for i in (self.left_idem, self.right_idem)]
return (p_l * self.getBigGrading() * p_r.inverse()).toSmallGrading()
def getGrading(self):
if DEFAULT_GRADING == BIG_GRADING:
return self.getBigGrading()
else: # DEFAULT_GRADING == SMALL_GRADING
return self.getSmallGrading()
def __eq__(self, other):
return self.parent == other.parent \
and self.left_idem == other.left_idem \
and self.strands == other.strands
def __ne__(self, other):
return not (self == other)
@memorizeHash
def __hash__(self):
return hash((self.parent, self.left_idem, self.strands))
def __str__(self):
return "[%s]" % \
",".join([str(self.pmc.pairs[i]) for i in self.double_hor] +
["%s->%s" % (p, q) for (p, q) in self.strands])
def __repr__(self):
return str(self)
def isIdempotent(self):
"""Tests whether this generator is an idempotent."""
return len(self.strands) == 0
@memorize
def opp(self):
"""Returns the opposite strand diagram in the opposite strand
algebra.
"""
return StrandDiagram(self.parent.opp(), self.right_idem.opp(),
self.strands.opp(), self.left_idem.opp())
def numCrossing(self):
"""Returns the number of crossings between moving strands."""
return sum(1 for (s1, t1) in self.strands for (s2, t2) in self.strands
if s1 < s2 and t1 > t2)
def maslov(self):
"""Returns the Maslov index, defined as i(a) = inv(a) - m([a],S)."""
maslov = Fraction()
for s, t in self.strands:
maslov -= Fraction(self.multiplicity[s], 2)
if s != 0:
maslov -= Fraction(self.multiplicity[s-1], 2)
maslov += self.numCrossing()
return maslov
def getLeftIdem(self):
"""Return the left idempotent."""
return self.left_idem
def getRightIdem(self):
"""Return the right idempotent."""
return self.right_idem
def unconnectSumStrandDiagram(sd, genus1):
"""Returns a pair of strand diagrams (sd1, sd2) in the algebra of
(pmc1, pmc2), where (pmc1, pmc2) is the pair returned by
unconnectSumPMC(sd.pmc, genus1).
"""
pmc1, pmc2 = unconnectSumPMC(sd.pmc, genus1)
left_idem1, left_idem2 = unconnectSumIdem(sd.left_idem, genus1)
strands1, strands2 = unconnectSumStrands(sd.strands, genus1)
alg1 = StrandAlgebra(F2, pmc1, len(left_idem1), sd.mult_one)
alg2 = StrandAlgebra(F2, pmc2, len(left_idem2), sd.mult_one)
return (StrandDiagram(alg1, left_idem1, strands1),
StrandDiagram(alg2, left_idem2, strands2))
class StrandAlgebra(DGAlgebra):
"""Represents the strand algebra of a PMC."""
def __init__(self, ring, pmc, idem_size, mult_one = MULT_ONE):
"""Specifies the PMC, size of idempotent, and whether this is a
multiplicity one algebra.
"""
DGAlgebra.__init__(self, ring)
self.pmc = pmc
self.idem_size = idem_size
self.mult_one = mult_one
def __str__(self):
return "Strand algebra over %s with idem_size = %d and mult_one = %r" \
% (str(self.pmc), self.idem_size, self.mult_one)
def __eq__(self, other):
if not isinstance(other, StrandAlgebra):
return False
return self.pmc == other.pmc and self.idem_size == other.idem_size \
and self.mult_one == other.mult_one
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.pmc, self.idem_size, self.mult_one))
@memorize
def getStrandDiagram(self, left_idem, strands):
"""Memorized version of creating new strand diagrams."""
return StrandDiagram(self, left_idem, strands)
def opp(self):
"""Returns the opposite algebra, as the strand algebra associated to
the opposite PMC.
"""
return StrandAlgebra(self.ring, self.pmc.opp(), self.idem_size,
self.mult_one)
@memorize
def diffRaw(self, gen):
"""Returns a list of elements of the form ((s1, s2), diff_term), where
s1 < s2 are starting points of strands in gen that crosses, and
diff_term is a generator in gen.diff() obtained by uncrossing these two
strands. Together they specify all terms in gen.diff().
"""
target_maslov = gen.maslov() - 1
cur_strands = gen.strands
result = []
def appendCandidate(new_strands, s1, s2):
# Same info except strands, then check grading
assert s1 < s2
diff_term = self.getStrandDiagram(
tuple(gen.left_idem), new_strands)
if self.mult_one or diff_term.maslov() == target_maslov:
result.append(((s1, s2), diff_term))
# Uncross two moving strands
for s1, t1 in cur_strands:
for s2, t2 in cur_strands:
if s1 < s2 and t1 > t2:
new_strands = list(cur_strands)
new_strands.remove((s1, t1))
new_strands.remove((s2, t2))
new_strands.extend([(s1, t2), (s2, t1)])
appendCandidate(tuple(sorted(new_strands)), s1, s2)
# Uncross a moving strand with a double horizontal
for st_id in range(len(cur_strands)):
s, t = cur_strands[st_id]
for i in gen.double_hor:
for p in gen.pmc.pairs[i]:
if s <= p and p <= t:
# Automatically sorted.
new_strands = cur_strands[:st_id] + \
((s, p), (p, t)) + cur_strands[st_id+1:]
appendCandidate(new_strands, s, p)
return result
@memorize
def diff(self, gen):
result = E0
if self.ring is F2:
for (s1, s2), dgen_term in self.diffRaw(gen):
result += dgen_term.elt()
else:
return NotImplemented
return result
@memorize
def getGenerators(self):
return self.pmc.getStrandDiagrams(self)
@memorize
def getGeneratorsForIdem(self, left_idem = None, right_idem = None):
"""Returns the list of generators with the specified left and right
idempotents. Giving None as input means no constraints there.
"""
return [gen for gen in self.getGenerators() if
(left_idem is None or gen.left_idem == left_idem) and
(right_idem is None or gen.right_idem == right_idem)]
@memorize
def getIdempotents(self):
"""Returns the set of idempotents. Use corresponding function in PMC.
"""
return self.pmc.getIdempotents()
def _multiplyRaw(self, gen1, gen2):
"""If gen1 and gen2 can be multiplied, return the generator that is
their product. Otherwise, return None.
"""
pmc = gen1.pmc
new_strands = []
# Keep track of which strands at right are not yet used.
strands_right = list(gen2.strands)
for sd in gen1.strands:
mid_idem = pmc.pairid[sd[1]]
possible_match = [sd2 for sd2 in strands_right
if pmc.pairid[sd2[0]] == mid_idem]
if len(possible_match) == 0:
new_strands.append(sd)
else: # len(possible_match) == 1
sd2 = possible_match[0]
if sd2[0] != sd[1]:
return None
else:
new_strands.append((sd[0], sd2[1]))
strands_right.remove(sd2)
new_strands.extend(strands_right)
new_strands = sorted(new_strands)
mult_term = self.getStrandDiagram(
tuple(gen1.left_idem), tuple(new_strands))
if self.mult_one or mult_term.getBigGrading() == \
gen1.getBigGrading() * gen2.getBigGrading():
return mult_term
else:
return None
def multiply(self, gen1, gen2):
if not isinstance(gen1, StrandDiagram):
return NotImplemented
if not isinstance(gen2, StrandDiagram):
return NotImplemented
assert gen1.parent == self and gen2.parent == self, \
"Algebra not compatible."
if gen1.right_idem != gen2.left_idem:
return E0
if self.mult_one:
# Enforce the multiplicity one condition
if not all(x <= 1 for x in [
m1 + m2 for m1, m2 in zip(gen1.multiplicity,
gen2.multiplicity)]):
return E0
prod_raw = self._multiplyRaw(gen1, gen2)
if prod_raw is None:
return E0
if self.ring is F2:
return prod_raw.elt()
else:
return NotImplemented
class StrandAlgebraElement(Element):
"""An element of strand algebra."""
def isIdempotent(self):
"""Tests whether this element is an idempotent."""
for sd, coeff in list(self.items()):
if not sd.isIdempotent():
return False
return True
def invertible(self):
"""Tests whether this element is invertible."""
return self != 0 and self.isIdempotent()
def inverse(self):
"""Returns the inverse of this element, if invertible. Undefined
behavior if the element is not invertible.
"""
return self
StrandDiagram.ELT_CLASS = StrandAlgebraElement
| 24,613
| 34.518038
| 80
|
py
|
bfh_python
|
bfh_python-master/dehntwisttest.py
|
"""Unit test for dehntwist.py"""
from dehntwist import *
import unittest
class DehnTwistTest(unittest.TestCase):
def testDehnTwist(self):
twist = DehnTwist(3, 1, POS)
twist_dd = twist.getDDStructure()
class AntiBraidTest(unittest.TestCase):
def testAntiBraid(self):
for genus, c_pair in [(3, 1), (3, 0), (3, 5)]:
antibraid = AntiBraid(genus, c_pair)
antibraid_dd = antibraid.getDDStructure()
def testAdmissibleAntiBraid(self):
for genus, c_pair in [(3, 1), (3, 2), (3, 0), (3, 5)]:
antibraid = AntiBraid(genus, c_pair)
antibraid_dd = antibraid.getAdmissibleDDStructure()
antibraid_dd.simplify()
self.assertTrue(
antibraid_dd.compareDDStructures(antibraid.getDDStructure()))
class DehnSurgeryTest(unittest.TestCase):
def testDehnSurgery(self):
for genus, c_pair, orientation in [(3, 1, NEG), (3, 2, POS),
(3, 0, NEG), (3, 5, POS)]:
surgery = DehnSurgery(genus, c_pair, orientation)
morphism = surgery.getMorphism()
self.assertEqual(morphism.diff(), 0)
def testDehnSurgeryMappingCone(self):
surgery = DehnSurgery(2, 1, NEG)
morphism = surgery.getMorphism()
morphism_cx = morphism.getElt().parent
mapping_cone = morphism_cx.getMappingCone(morphism)
# Six generators in identity, four in anti-braid
self.assertEqual(len(mapping_cone), 10)
self.assertTrue(mapping_cone.testDelta())
def testAdmissibleDehnSurgery(self):
for genus, c_pair, orientation in [(3, 1, NEG), (3, 2, POS),
(3, 0, NEG), (3, 5, POS)]:
surgery = DehnSurgery(genus, c_pair, orientation)
morphism = surgery.getMorphism(is_admissible = True)
self.assertEqual(morphism.diff(), 0)
# Form mapping cone
morphism_cx = morphism.getElt().parent
mapping_cone = morphism_cx.getMappingCone(morphism)
self.assertTrue(mapping_cone.testDelta())
# Test that the simplified mapping cone agrees with the one from the
# non-admissible case.
mapping_cone.simplify()
ori_morphism = surgery.getMorphism()
ori_morphism_cx = ori_morphism.getElt().parent
ori_mapping_cone = ori_morphism_cx.getMappingCone(ori_morphism)
self.assertTrue(
mapping_cone.compareDDStructures(ori_mapping_cone))
if __name__ == "__main__":
unittest.main()
| 2,609
| 37.955224
| 80
|
py
|
bfh_python
|
bfh_python-master/dehntwistdatest.py
|
"""Unit test for dehntwistda.py"""
from ddstructure import identityDD
from dehntwist import AntiBraid, DehnSurgery
from dehntwistda import *
import unittest
class AntiBraidDATest(unittest.TestCase):
def testLocalDA(self):
da = AntiBraidDA(2, 1)
self.assertTrue(da.local_da.testDelta())
def testAntiBraidDA(self):
for genus, c_pair in [(2, 1), (2, 2)]:
da = AntiBraidDA(genus, c_pair)
self.assertTrue(da.toSimpleDAStructure().testDelta())
def testAntiBraidAgreesWithDD(self):
for genus, c_pair in [(2, 1), (2, 2), (3, 2)]:
dastr = AntiBraidDA(genus, c_pair)
ddstr = dastr.tensorDD(identityDD(dastr.pmc))
ddstr.simplify()
ori_ddstr = AntiBraid(genus, c_pair).getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
def testLocalDAShort(self):
for genus, c_pair in [(2, 0), (2, 3)]:
da = AntiBraidDA(genus, c_pair)
self.assertTrue(da.local_da.testDelta())
def testAntiBraidDAShort(self):
for genus, c_pair in [(2, 0), (2, 3)]:
da = AntiBraidDA(genus, c_pair)
self.assertTrue(da.toSimpleDAStructure().testDelta())
def testAntiBraidAgreesWithDDShort(self):
for genus, c_pair in [(2, 0), (3, 0), (2, 3)]:
dastr = AntiBraidDA(genus, c_pair)
ddstr = dastr.tensorDD(identityDD(dastr.pmc))
ddstr.simplify()
ori_ddstr = AntiBraid(genus, c_pair).getDDStructure()
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
class DehnSurgeryDATest(unittest.TestCase):
def testLocalDA(self):
for genus, c_pair, orientation in [(2, 1, NEG), (2, 1, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
morphism = ds.getLocalMorphism()
self.assertEqual(ds.getLocalMorphism().diff(), 0)
self.assertTrue(ds.getLocalMappingCone().testDelta())
def testDehnSurgeryDA(self):
for genus, c_pair, orientation in [(2, 1, NEG), (2, 1, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
self.assertTrue(
ds.getMappingCone().toSimpleDAStructure().testDelta())
def testDehnSurgeryAgreesWithDD(self):
for genus, c_pair, orientation in [(2, 1, NEG), (2, 1, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
dastr = ds.getMappingCone()
ddstr = dastr.tensorDD(identityDD(dastr.algebra1.pmc))
ddstr.simplify(
cancellation_constraint = lambda x, y: (
x.filtration == y.filtration))
ori_mor = DehnSurgery(genus, c_pair, orientation).getMorphism()
ori_mor_cx = ori_mor.getElt().parent
ori_ddstr = ori_mor_cx.getMappingCone(ori_mor)
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
def testLocalDAShort(self):
for genus, c_pair, orientation in [
(2, 0, NEG), (2, 0, POS), (2, 3, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
morphism = ds.getLocalMorphism()
self.assertEqual(ds.getLocalMorphism().diff(), 0)
self.assertTrue(ds.getLocalMappingCone().testDelta())
def testDehnSurgeryDAShort(self):
for genus, c_pair, orientation in [
(2, 0, NEG), (2, 0, POS), (2, 3, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
self.assertTrue(
ds.getMappingCone().toSimpleDAStructure().testDelta())
def testDehnSurgeryAgreesWithDDShort(self):
for genus, c_pair, orientation in [
(2, 0, NEG), (2, 0, POS), (2, 3, POS)]:
ds = DehnSurgeryDA(genus, c_pair, orientation)
dastr = ds.getMappingCone()
ddstr = dastr.tensorDD(identityDD(dastr.algebra1.pmc))
ddstr.simplify(
cancellation_constraint = lambda x, y: (
x.filtration == y.filtration))
ori_mor = DehnSurgery(genus, c_pair, orientation).getMorphism()
ori_mor_cx = ori_mor.getElt().parent
ori_ddstr = ori_mor_cx.getMappingCone(ori_mor)
self.assertTrue(ddstr.compareDDStructures(ori_ddstr))
if __name__ == "__main__":
unittest.main()
| 4,316
| 40.509615
| 75
|
py
|
bfh_python
|
bfh_python-master/pmctest.py
|
"""Unit test for pmc.py"""
from grading import averageRefinement
from pmc import *
import unittest
class PMCTest(unittest.TestCase):
def testPMC(self):
pmc1 = PMC([(0,2),(1,3)])
for p, q in [(0,2),(2,0),(1,3),(3,1)]:
self.assertEqual(pmc1.otherp[p], q)
for p, i in [(0,0),(2,0),(1,1),(3,1)]:
self.assertEqual(pmc1.pairid[p], i)
for i, (p,q) in [(0, (0,2)), (1, (1,3))]:
self.assertEqual(pmc1.pairs[i], (p,q))
def testPMCEqual(self):
pmc1 = PMC([(0,2),(1,3)])
pmc2 = PMC([(1,3),(0,2)])
pmc3 = PMC([(2,0),(3,1)])
self.assertEqual(pmc1, pmc2)
self.assertEqual(pmc1, pmc3)
self.assertEqual(hash(pmc1), hash(pmc2))
self.assertEqual(hash(pmc1), hash(pmc3))
def testPMCOpp(self):
pmc1 = PMC([(0,2),(1,3)])
self.assertEqual(pmc1.opp(), pmc1)
pmc2 = PMC([(0,2),(1,6),(3,5),(4,7)])
pmc3 = PMC([(0,3),(1,6),(2,4),(5,7)])
self.assertEqual(pmc2.opp(), pmc3)
self.assertEqual(pmc3.opp(), pmc2)
self.assertTrue(pmc2 != pmc3)
def testSplitPMC(self):
self.assertEqual(splitPMC(1), PMC([(0,2),(1,3)]))
self.assertEqual(splitPMC(2), PMC([(0,2),(1,3),(4,6),(5,7)]))
def testLinearPMC(self):
self.assertEqual(linearPMC(1), PMC([(0,2),(1,3)]))
self.assertEqual(linearPMC(2), PMC([(0,2),(1,4),(3,6),(5,7)]))
def testAntipodalPMC(self):
self.assertEqual(antipodalPMC(1), PMC([(0,2),(1,3)]))
self.assertEqual(antipodalPMC(2), PMC([(0,4),(1,5),(2,6),(3,7)]))
def testConnectSumPMC(self):
self.assertEqual(connectSumPMC(linearPMC(2), splitPMC(1)),
PMC([(0,2),(1,4),(3,6),(5,7),(8,10),(9,11)]))
def testUnconnectSumPMC(self):
self.assertEqual(unconnectSumPMC(splitPMC(2), 1),
(splitPMC(1), splitPMC(1)))
self.assertEqual(
unconnectSumPMC(connectSumPMC(linearPMC(2), splitPMC(1)), 2),
(linearPMC(2), splitPMC(1)))
def testGetIdempotents(self):
pmc = splitPMC(2)
idems = pmc.getIdempotents()
self.assertEqual(len(idems), 6)
for idem in idems:
self.assertTrue(isinstance(idem, Idempotent))
self.assertEqual(idem.pmc, pmc)
def testGetStrandDiagrams(self):
pmc = splitPMC(1)
alg = pmc.getAlgebra(mult_one = False)
self.assertEqual(len(alg.getGenerators()), 8)
pmc2 = splitPMC(2)
alg2 = pmc2.getAlgebra(mult_one = False)
for sd2 in alg2.getGenerators():
self.assertTrue(isinstance(sd2, StrandDiagram))
self.assertEqual(sd2.pmc, pmc2)
self.assertEqual(sd2.mult_one, False)
def testGetMultOneStrandDiagrams(self):
pmc = splitPMC(1)
alg = pmc.getAlgebra(idem_size = 2, mult_one = True)
for sd in alg.getGenerators():
self.assertTrue(isinstance(sd, StrandDiagram))
self.assertEqual(sd.pmc, pmc)
self.assertEqual(sd.mult_one, True)
self.assertTrue(all([x <= 1 for x in sd.multiplicity]))
self.assertEqual(len(alg.getGenerators()), 5)
class IdempotentTest(unittest.TestCase):
def testIdempotent(self):
pmc = antipodalPMC(2)
idem1 = Idempotent(pmc, [0,1])
self.assertEqual(idem1.opp(), Idempotent(pmc, [2,3]))
self.assertEqual(idem1.comp(), Idempotent(pmc, [2,3]))
idem2 = Idempotent(pmc, [0,3])
self.assertEqual(idem2.opp(), Idempotent(pmc, [0,3]))
self.assertEqual(idem2.comp(), Idempotent(pmc, [1,2]))
pmc2 = PMC([(0,2),(1,6),(3,5),(4,7)])
# Note pmc2.opp() = PMC([(0,3),(1,6),(2,4),(5,7)])
idem3 = Idempotent(pmc2, [0,1])
self.assertEqual(idem3.opp(), Idempotent(pmc2.opp(), [1,3]))
self.assertEqual(idem3.comp(), Idempotent(pmc2, [2,3]))
def testUnconnectSumIdem(self):
pmc1, pmc2, pmc3 = splitPMC(1), splitPMC(2), splitPMC(3)
idem1 = Idempotent(pmc3, [0, 2, 4])
self.assertEqual(unconnectSumIdem(idem1, 1),
(Idempotent(pmc1, [0]), Idempotent(pmc2, [0, 2])))
self.assertEqual(unconnectSumIdem(idem1, 2),
(Idempotent(pmc2, [0, 2]), Idempotent(pmc1, [0])))
idem2 = Idempotent(pmc3, [0, 1, 2])
self.assertEqual(unconnectSumIdem(idem2, 1),
(Idempotent(pmc1, [0, 1]), Idempotent(pmc2, [0])))
self.assertEqual(unconnectSumIdem(idem2, 2),
(Idempotent(pmc2, [0, 1, 2]), Idempotent(pmc1, [])))
class StrandsTest(unittest.TestCase):
def testStrands(self):
pmc = PMC([(0,2),(1,6),(3,5),(4,7)])
sd1 = Strands(pmc, [(0,1)])
self.assertEqual(sd1.opp(), Strands(pmc.opp(), [(6,7)]))
def testIdemCompatible(self):
pmc = splitPMC(2)
sd1 = Strands(pmc, [(0,1)])
self.assertTrue(sd1.leftCompatible(pmc.idem([0,4])))
self.assertFalse(sd1.leftCompatible(pmc.idem([0,1])))
self.assertFalse(sd1.leftCompatible(pmc.idem([1,4])))
self.assertFalse(sd1.leftCompatible(pmc.idem([])))
self.assertTrue(sd1.rightCompatible(pmc.idem([3])))
self.assertFalse(sd1.rightCompatible(pmc.idem([2])))
def testUnconnectSumStrands(self):
pmc1, pmc2 = splitPMC(1), splitPMC(2)
sd1 = Strands(pmc2, [(0,3),(4,5)])
self.assertEqual(unconnectSumStrands(sd1, 1),
(Strands(pmc1, [(0,3)]), Strands(pmc1, [(0,1)])))
sd2 = Strands(pmc2, [(5,7)])
self.assertEqual(unconnectSumStrands(sd2, 1),
(Strands(pmc1, []), Strands(pmc1, [(1,3)])))
class StrandDiagramTest(unittest.TestCase):
def setUp(self):
self.pmc = splitPMC(2)
self.sd1 = self.pmc.sd([(0,3),(1,4)], mult_one = False)
self.sd2 = self.pmc.sd([1,(0,4)], mult_one = True)
self.sd3 = self.pmc.sd([(0,1),(1,2)], mult_one = False)
self.sd4 = self.pmc.sd([(0,4),(1,3)], mult_one = False)
def testStrandDiagramInit(self):
self.assertEqual(self.sd1.multiplicity, [1,2,2,1,0,0,0])
self.assertEqual(self.sd1.parent,
self.pmc.getAlgebra(mult_one = False))
self.assertEqual(self.sd1.double_hor, ())
self.assertEqual(self.sd2.multiplicity, [1,1,1,1,0,0,0])
self.assertEqual(self.sd2.parent, self.pmc.getAlgebra(mult_one = True))
self.assertEqual(self.sd2.double_hor, (1,))
def testStrandDiagramOpp(self):
self.assertEqual(self.sd1.opp(),
self.pmc.opp().sd([(4,7),(3,6)], mult_one = False))
self.assertEqual(self.sd2.opp(),
self.pmc.opp().sd([6,(3,7)], mult_one = True))
def testStrandDiagramEqual(self):
pmc2 = splitPMC(2)
pmc2alg = pmc2.getAlgebra(mult_one = False)
sd10 = StrandDiagram(pmc2alg, left_idem = [1,0],
strands = [(1,4),(0,3)])
self.assertEqual(self.sd1, sd10)
self.assertTrue(self.sd1 != self.sd4)
self.assertEqual(hash(self.sd1), hash(sd10))
def testPropagateRight(self):
self.assertEqual(self.sd1.right_idem, self.pmc.idem([1,4]))
self.assertEqual(self.sd2.right_idem, self.pmc.idem([1,4]))
self.assertEqual(self.sd3.right_idem, self.pmc.idem([0,1]))
def testNumCrossing(self):
self.assertEqual(self.sd1.numCrossing(), 0)
self.assertEqual(self.sd3.numCrossing(), 0)
self.assertEqual(self.sd4.numCrossing(), 1)
def testMaslov(self):
self.assertEqual(self.sd1.maslov(), Fraction(-2))
self.assertEqual(self.sd2.maslov(), Fraction(-1,2))
self.assertEqual(self.sd3.maslov(), Fraction(-3,2))
self.assertEqual(self.sd4.maslov(), Fraction(-1))
def testGrading(self):
self.assertEqual(self.sd1.getBigGrading(),
self.pmc.big_gr(-2, [1,2,2,1,0,0,0]))
def testDiff(self):
self.assertEqual(self.sd1.diff(), 0)
self.assertEqual(self.sd4.diff(), 1*self.sd1)
sd2d1 = self.pmc.sd([(0,1),(1,4)], True)
sd2d2 = self.pmc.sd([(0,3),(3,4)], True)
self.assertEqual(self.sd2.diff(), 1*sd2d1 + 1*sd2d2)
# Test for double crossing
pmc2 = antipodalPMC(2)
sd10 = pmc2.sd([(0,7),(1,6),(2,5)], False)
sd10d1 = pmc2.sd([(0,6),(1,7),(2,5)], False)
sd10d2 = pmc2.sd([(0,7),(1,5),(2,6)], False)
self.assertEqual(sd10.diff(), 1*sd10d1 + 1*sd10d2)
def testMultiply(self):
# TODO: improve structure of this test
sd1 = self.pmc.sd([4,(0,1)], False)
sd2 = self.pmc.sd([4,(1,2)], False)
sd12 = self.pmc.sd([4,(0,2)], False)
self.assertEqual(sd1 * sd2, 1*sd12)
self.assertEqual(sd2 * sd1, 0)
sd3 = self.pmc.sd([1,(4,5)], False)
sd4 = self.pmc.sd([5,(3,4)], False)
sd34 = self.pmc.sd([(3,4),(4,5)], False)
self.assertEqual(sd3 * sd4, 1*sd34)
self.assertEqual(sd4 * sd3, 0)
# Test for double crossing
sd5 = self.pmc.sd([0,(1,3)], False)
sd6 = self.pmc.sd([1,(2,4)], False)
self.assertEqual(sd5 * sd6, 0)
sd7 = self.pmc.sd([0,(1,3)], False)
sd8 = self.pmc.sd([3,(0,2)], False)
sd9 = self.pmc.sd([(0,2),(1,3)], False)
self.assertEqual(sd7 * sd8, 1*sd9)
self.assertEqual(sd8 * sd7, 0)
sd10 = self.pmc.sd([0,(1,3)], True)
sd11 = self.pmc.sd([3,(0,2)], True)
self.assertEqual(sd10 * sd11, 0)
def testAntiDiff(self):
self.assertEqual(self.sd1.antiDiff(), 1*self.sd4)
self.assertEqual(self.sd2.antiDiff(), 0)
sd3_ans = self.pmc.sd([1,(0,2)], False)
self.assertEqual(self.sd3.antiDiff(), 1*sd3_ans)
self.assertEqual(self.sd4.antiDiff(), 0)
def testFactor(self):
# Number after each strand diagram is the number of factorizations
tests = [(self.sd1, 8), (self.sd2, 3), (self.sd3, 2), (self.sd4, 9)]
for sd, n in tests:
self.assertEqual(len(sd.factor()), n)
def testStrandDiagramOppGrading(self):
# Verify the relations gr'(a).opp() = gr'(a.opp()) and
# gr(a).opp() = gr(a.opp()).
sd_to_test = [self.sd1, self.sd2, self.sd3, self.sd4]
for sd in sd_to_test:
self.assertEqual(sd.getBigGrading().opp(),
sd.opp().getBigGrading())
# This only works if DEFAULT_REFINEMENT is set to averageRefinement.
self.assertEqual(
sd.getSmallGrading(refinement = averageRefinement).opp(),
sd.opp().getSmallGrading(refinement = averageRefinement))
def testUnconnectSumStrandDiagram(self):
sd1 = self.pmc.sd([4,(0,3)], False)
pmc1 = splitPMC(1)
self.assertEqual(unconnectSumStrandDiagram(sd1, 1),
(pmc1.sd([(0,3)], False), pmc1.sd([0], False)))
sd2 = self.pmc.sd([(1,2),(6,7)], False)
self.assertEqual(unconnectSumStrandDiagram(sd2, 1),
(pmc1.sd([(1,2)], False), pmc1.sd([(2,3)], False)))
if __name__ == "__main__":
unittest.main()
| 11,205
| 40.350554
| 80
|
py
|
CppDNN-develop
|
CppDNN-develop/example/keras_simple/simple.py
|
from keras import models
from keras import layers
from numpy import array
mnistfile = open('mnist_example', 'r')
mnistdata = mnistfile.read()
mnistdata = mnistdata.splitlines()[0].split(' ')
mnistdataf = []
for m in mnistdata:
mnistdataf.append(float(m))
mnistdata = array(mnistdataf)
mnistdata = mnistdata.reshape((1, 784))
mnistdata = [1, 2, 1, 2, 1]
mnistdata = array(mnistdata)
mnistdata = mnistdata.reshape((1, 5))
network = models.Sequential()
network.add(layers.Dense(8, input_shape=(5,)))
network.add(layers.Dense(5, activation='relu'))
network.add(layers.Dense(2, activation='softmax'))
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(network.predict(mnistdata))
network.save('simple.h5')
| 788
| 27.178571
| 50
|
py
|
CppDNN-develop
|
CppDNN-develop/script/DecodeKerasModel.py
|
import sys
from keras import models
if len(sys.argv) < 3:
print('usage: python DecodeKerasModel.py input output')
exit(1)
input = sys.argv[1]
output = sys.argv[2]
print(input)
outputFile = open(output, 'w')
model = models.load_model(input)
weights_list = model.get_weights()
print("#################################################################")
print("# Layer Numbers: " + str(len(weights_list)) + '\n')
outputFile.write("# Layer Numbers: " + str(int(len(weights_list)/2)) + '\n')
for l in range(int(len(weights_list)/2)):
w = weights_list[l * 2]
b = weights_list[l * 2 + 1]
outputFile.write("# Layer Number: {}".format(l) + '\n')
print("# Layer Number: {}".format(l) + '\n')
outputFile.write(model.layers[l].activation.__str__().split(' ')[1] + '\n')
outputFile.write(str(len(b)) + ' ' + str(len(w)) + '\n')
print(str(len(b)) + ' ' + str(len(w)) + '\n')
outputFile.write("# W" + '\n')
print(w.shape)
for x in w:
for y in x:
outputFile.write(str(y) + '\n')
outputFile.write("# B" + '\n')
print(b.shape)
for x in b:
outputFile.write(str(x) + '\n')
outputFile.close()
| 1,171
| 30.675676
| 79
|
py
|
CppDNN-develop
|
CppDNN-develop/script/NeuralNetworkSaver.py
|
def NeuralNetworkSaver(ns, layers: list, save_in: str):
file = open(save_in, "w")
file.write("# Layer Numbers: " + str(len(layers)))
file.write('\n')
for i, layer in enumerate(layers):
file.write("# Layer Number: " + str(i) + "\n")
file.write(layer[0] + "\n")
file.write(str(ns[i+1]) + " " + str(ns[i]) + '\n')
file.write("# W\n")
file.write(tensor_to_str(layer[1]))
file.write("# B\n")
file.write(tensor_to_str(layer[2]))
def tensor_to_str(tensor):
out = ""
for i in range(len(tensor)):
for j in range(len(tensor[i])):
out += str(tensor[i][j]) + "\n"
return out
| 674
| 24.961538
| 58
|
py
|
CppDNN-develop
|
CppDNN-develop/script/DecodeTensorFlowModel.py
|
import tensorflow as tf
import NeuralNetworkSaver as nns
nx = 94
n1 = 256
n2 = 64
# n3 = 32
n4 = 11
with tf.variable_scope("Layer1"):
w1 = tf.Variable(tf.random_normal([nx, n1]), name="weight_1")
b1 = tf.Variable(tf.random_normal([1, n1]), name="bias_1")
# o1 = tf.nn.relu(tf.add(tf.matmul(x, w1), b1, name="o1"))
with tf.variable_scope("Layer2"):
w2 = tf.Variable(tf.random_normal([n1, n2]), name="weight_2")
b2 = tf.Variable(tf.random_normal([1, n2]), name="bias_2")
# o2 = tf.nn.relu(tf.add(tf.matmul(o1, w2), b2, name="o2"))
with tf.variable_scope("Out"):
w4 = tf.Variable(tf.random_normal([n2, n4]), name="weight_out")
b4 = tf.Variable(tf.random_normal([1, n4]), name="bias_out")
# o4 = (tf.add(tf.matmul(o2, w4), b4, name="o4"))
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, './logs/SaveforLoad/SL.ckpt')
_w1, _w2, _w4, _b1, _b2, _b4 = sess.run([w1, w2, w4, b1, b2, b4])
nns.NeuralNetworkSaver([nx, n1, n2, n4], [["relu", _w1, _b1], ["relu", _w2, _b2], ["linear", _w4, _b4]], "test")
| 1,131
| 32.294118
| 116
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/main_arxiv_node_classification.py
|
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
from ogb.nodeproppred import Evaluator
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.ogb_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if not net_params['edge_feat']:
edge_feat_dim = 1
dataset.dataset.data.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'],DATASET_NAME)
print('Time PE:',time.time()-start0)
device = net_params['device']
if DATASET_NAME == 'ogbn-mag':
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train']['paper'].to(device),\
dataset.split_idx['valid']['paper'].to(device), \
dataset.split_idx['test']['paper'].to(device)
else:
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train'].to(device), \
dataset.split_idx['valid'].to(device), \
dataset.split_idx['test'].to(device)
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", dataset.split_idx['train'].size(0))
print("Validation Graphs: ", dataset.split_idx['valid'].size(0))
print("Test Graphs: ", dataset.split_idx['test'].size(0))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
evaluator = Evaluator(name = DATASET_NAME)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
if DATASET_NAME == 'ogbn-arxiv': # , 'ogbn-proteins''
from train.train_ogb_node_classification import train_epoch_arxiv as train_epoch, evaluate_network_arxiv as evaluate_network
elif DATASET_NAME == 'ogbn-proteins':
from train.train_ogb_node_classification import train_epoch_proteins as train_epoch, evaluate_network_proteins as evaluate_network
# elif DATASET_NAME == 'ogbn-mag':
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss = train_epoch(model, optimizer, device, dataset.dataset[0], dataset.split_idx['train'])
epoch_train_acc, epoch_val_acc, epoch_test_acc, epoch_val_loss = evaluate_network(model, device, dataset, evaluator)
# _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
train_acc, val_acc, test_acc, _ = evaluate_network(model, device, dataset, evaluator)
train_acc, val_acc, test_acc = 100 * train_acc, 100 * val_acc, 100 * test_acc
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', action='store_true', default=False, help="Please give a value for pos_enc")
parser.add_argument('--use_node_embedding', action='store_true', default=False)
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = True if args.use_node_embedding == True else False
dataset = LoadData(DATASET_NAME = DATASET_NAME, use_node_embedding = params['use_node_embedding'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
# net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc==True else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# arxiv 'ogbn-mag'
net_params['in_dim'] = dataset.dataset[0].x.size(1)
# dataset.dataset[0]
# net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
net_params['n_classes'] = dataset.dataset[0].y.size(1) if DATASET_NAME == 'ogbn-proteins' else torch.unique(dataset.dataset[0].y,dim=0).size(0)
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,431
| 41.060606
| 202
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/test.py
|
# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
# # coding=gbk
# from tqdm import trange
# from random import random,randint
# import time
#
# with trange(100) as t:
# for i in t:
# #t.set_description("GEN111 %i" % i)
# t.set_postfix(loss=8,gen=randint(1,999),str="h",lst=[1,2],lst11=[1,2],loss11=8)
# time.sleep(0.1)
# t.close()
# import dgl
# import torch as th
# # 4 nodes, 3 edges
# # g1 = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 3])))
# def positional_encoding(g, pos_enc_dim):
# """
# Graph positional encoding v/ Laplacian eigenvectors
# """
#
# # Laplacian
# A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
# N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
# L = sp.eye(g.number_of_nodes()) - N * A * N
#
# # Eigenvectors with numpy
# EigVal, EigVec = np.linalg.eig(L.toarray())
# idx = EigVal.argsort() # increasing order from min to max order index
# EigVal, EigVec = EigVal[idx], np.real(EigVec[:, idx])
# g.ndata['pos_enc'] = torch.from_numpy(EigVec[:, 1:pos_enc_dim + 1]).float()
#
# # # Eigenvectors with scipy
# # EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# # EigVec = EigVec[:, EigVal.argsort()] # increasing order
# # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
#
# return g
#
#
# def message_func(edges):
# Bh_j = edges.src['Bh']
# e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh'] # e_ij = Ce_ij + Dhi + Ehj
# edges.data['e'] = e_ij
# return {'Bh_j': Bh_j, 'e_ij': e_ij}
#
#
# def reduce_func(nodes):
# Ah_i = nodes.data['Ah']#这个对只有出去,没有进来的点没有。这个时候只能是0,还不如加个自循环,用messange来做
# Bh_j = nodes.mailbox['Bh_j']
# e = nodes.mailbox['e_ij']
# sigma_ij = torch.sigmoid(e.float()) # sigma_ij = sigmoid(e_ij)
# # h = Ah_i + torch.mean( sigma_ij * Bh_j, dim=1 ) # hi = Ahi + mean_j alpha_ij * Bhj
# h = Ah_i + torch.sum(sigma_ij * Bh_j, dim=1) / (torch.sum(sigma_ij,
# dim=1) + 1e-6) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
# return {'h': h}
# g1 = dgl.DGLGraph()
# g1.add_nodes(4)
# g1.add_edges([0, 1, 2], [1, 2, 3])
# # g1.ndata['h'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Ah'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Bh'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Dh'] = th.randn((4, 3),dtype=th.float32)
# # g1.ndata['Eh'] = th.randn((4, 3),dtype=th.float32)
# # g1.edata['e']=th.randn((3, 3),dtype=th.float32)
# # g1.edata['Ce'] = th.randn((3, 3),dtype=th.float32)
# g1.ndata['h'] = th.reshape(th.arange(1, 13), (4, 3))
# g1.ndata['Ah'] = th.reshape(th.arange(13, 25), (4, 3))
# g1.ndata['Bh'] = th.reshape(th.arange(26, 38), (4, 3))
# g1.ndata['Dh'] = th.reshape(th.arange(39, 51), (4, 3))
# g1.ndata['Eh'] = th.reshape(th.arange(52, 64), (4, 3))
# g1.edata['e'] = th.reshape(th.arange(65, 74), (3, 3))
# g1.edata['Ce'] = th.reshape(th.arange(75, 84), (3, 3))
# positional_encoding(g1, 3)
# # 3 nodes, 4 edges
# g2 = dgl.DGLGraph()
# g2.add_nodes(3)
# g2.add_edges([0, 0, 0, 1], [0, 1, 2, 0])
# g2.ndata['h'] = th.reshape(th.arange(101, 110), (3, 3))
# g2.ndata['Ah'] = th.reshape(th.arange(113, 122), (3, 3))
# g2.ndata['Bh'] = th.reshape(th.arange(126, 135), (3, 3))
# g2.ndata['Dh'] = th.reshape(th.arange(139, 148), (3, 3))
# g2.ndata['Eh'] = th.reshape(th.arange(152, 161), (3, 3))
# g2.edata['e'] = th.reshape(th.arange(165, 177), (4, 3))
# g2.edata['Ce'] = th.reshape(th.arange(175, 187), (4, 3))
# bg = dgl.batch([g1, g2])
# bg.update_all(message_func, reduce_func)
# bg.ndata['h']
# a = 1+1
# # g3 = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 3])))
# # g4 = dgl.graph((th.tensor([0, 0, 0, 1]), th.tensor([0, 1, 2, 0])))
# # bg = dgl.batch([g3, g4], edge_attrs=None)
#
# # import dgl
# # import torch as th
# # g1 = dgl.DGLGraph()
# # g1.add_nodes(2) # Add 2 nodes
# # g1.add_edge(0, 1) # Add edge 0 -> 1
# # g1.ndata['hv'] = th.tensor([[0.], [1.]]) # Initialize node features
# # g1.edata['he'] = th.tensor([[0.]]) # Initialize edge features
# # g2 = dgl.DGLGraph()
# # g2.add_nodes(3) # Add 3 nodes
# # g2.add_edges([0, 2], [1, 1]) # Add edges 0 -> 1, 2 -> 1
# # g2.ndata['hv'] = th.tensor([[2.], [3.], [4.]]) # Initialize node features
# # g2.edata['he'] = th.tensor([[1.], [2.]]) # Initialize edge features
# # bg = dgl.batch([g1, g2], edge_attrs=None)
# import time
# try:
# while True:
# print("你好")
# time.sleep(1)
# except KeyboardInterrupt:
# print('aa')
#
# print("好!")
# import numpy as np
# import torch
# import pickle
# import time
# import os
# import matplotlib.pyplot as plt
# if not os.path.isfile('molecules.zip'):
# print('downloading..')
# !curl https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1 -o molecules.zip -J -L -k
# !unzip molecules.zip -d ../
# # !tar -xvf molecules.zip -C ../
# else:
# print('File already downloaded')
from tqdm import tqdm
import time
for i in tqdm(range(10000)):
time.sleep(0.001)
| 5,463
| 34.712418
| 145
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/main_Planetoid_node_classification.py
|
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
# from configs.base import Grid, Config
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.Planetoid_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
avg_test_acc = []
avg_train_acc = []
avg_val_acc = []
avg_convergence_epochs = []
t0 = time.time()
per_epoch_time = []
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(dataset.name, MODEL_NAME, params, net_params, net_params['total_param']))
# At any point you can hit Ctrl + C to break out of training early.
try:
for split_number in range(10):
training_scores, val_scores, test_scores, epochs = [], [], [], []
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
# Mitigate bad random initializations
train_idx, val_idx, test_idx = dataset.train_idx[split_number], dataset.val_idx[split_number], \
dataset.test_idx[split_number]
print("Training Nodes: ", len(train_idx))
print("Validation Nodes: ", len(val_idx))
print("Test Nodes: ", len(test_idx))
print("Number of Classes: ", net_params['n_classes'])
for run in range(3):
t0_split = time.time()
print("RUN NUMBER:", split_number, run)
log_dir = os.path.join(root_log_dir, "RUN_" + str(split_number))
writer = SummaryWriter(log_dir=log_dir)
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
from train.train_Planetoid_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
with tqdm(range(params['epochs']), ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, dataset, train_idx)
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, dataset, val_idx)
_, epoch_test_acc = evaluate_network(model, device, dataset, test_idx)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_" + str(split_number))
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
# it is for save the models.
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0_split > params['max_time']*3600/10: # Dividing max_time by 10, since there are 10 runs in TUs
print('-' * 89)
print("Max_time for one train-val-test split experiment elapsed {:.3f} hours, so stopping".format(params['max_time']/10))
break
_, test_acc = evaluate_network(model, device, dataset, test_idx)
_, val_acc = evaluate_network(model, device, dataset, val_idx)
_, train_acc = evaluate_network(model, device, dataset, train_idx)
training_scores.append(train_acc)
val_scores.append(val_acc)
test_scores.append(test_acc)
epochs.append(epoch)
training_score = sum(training_scores) / 3
val_score = sum(val_scores) / 3
test_score = sum(test_scores) / 3
epoch_score = sum(epochs) / 3
avg_val_acc.append(val_score)
avg_test_acc.append(test_score)
avg_train_acc.append(training_score)
avg_convergence_epochs.append(epoch_score)
print("Test Accuracy [LAST EPOCH]: {:.4f}".format(test_score))
print("Val Accuracy: {:.4f}".format(val_score))
print("Train Accuracy [LAST EPOCH]: {:.4f}".format(training_score))
print("Convergence Time (Epochs): {:.4f}".format(epoch_score))
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
print("TOTAL TIME TAKEN: {:.4f}hrs".format((time.time()-t0)/3600))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
print("AVG CONVERGENCE Time (Epochs): {:.4f}".format(np.mean(np.array(avg_convergence_epochs))))
# Final test accuracy value averaged over 10-fold
print("""\n\n\nFINAL RESULTS\n\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))
print("\nAll splits Test Accuracies:\n", avg_test_acc)
print("""\n\n\nFINAL RESULTS\n\nVAL ACCURACY averaged: {:.4f} with s.d. {:.4f}""".format(
np.mean(np.array(avg_val_acc)) * 100, np.std(avg_val_acc) * 100))
print("\nAll splits Val Accuracies:\n", avg_val_acc)
print("""\n\n\nFINAL RESULTS\n\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100))
print("\nAll splits Train Accuracies:\n", avg_train_acc)
writer.close()
"""
Write the results in out/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\nval ACCURACY averaged: {:.4f} with s.d. {:.4f}\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\n\n
Average Convergence Time (Epochs): {:.4f} with s.d. {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
.format(dataset.name, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
np.mean(np.array(avg_val_acc))*100, np.std(avg_val_acc)*100,
np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,
np.mean(avg_convergence_epochs), np.std(avg_convergence_epochs),
(time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--use_node_embedding', action='store_true')
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# it uses to separate the hyper-parameter, to do
# model_configurations = Grid(config_file, dataset_name)
# model_configuration = Config(**model_configurations[0])
#
# exp_path = os.path.join(result_folder, f'{model_configuration.exp_name}_assessment')
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = bool(args.use_node_embedding)
dataset = LoadData(DATASET_NAME, use_node_embedding = params['use_node_embedding'],framework = params['framework'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# Planetoid
net_params['in_dim'] = dataset.dataset[0].x.size(1)
net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
if MODEL_NAME == 'DiffPool':
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 21,251
| 43
| 188
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/main_ogb_node_classification.py
|
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
from ogb.nodeproppred import Evaluator
from torch_geometric.data import RandomNodeSampler
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.ogb_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if not net_params['edge_feat']:
edge_feat_dim = 1
if DATASET_NAME == 'ogbn-mag':
dataset.dataset.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
else:
dataset.dataset.data.edge_attr = torch.ones(dataset.dataset[0].num_edges, edge_feat_dim).type(torch.float32)
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'],DATASET_NAME)
print('Time PE:',time.time()-start0)
device = net_params['device']
if DATASET_NAME == 'ogbn-mag':
dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train']['paper'],\
dataset.split_idx['valid']['paper'], \
dataset.split_idx['test']['paper']
# else:
# dataset.split_idx['train'], dataset.split_idx['valid'], dataset.split_idx['test'] = dataset.split_idx['train'].to(device), \
# dataset.split_idx['valid'].to(device), \
# dataset.split_idx['test'].to(device)
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", dataset.split_idx['train'].size(0))
print("Validation Graphs: ", dataset.split_idx['valid'].size(0))
print("Test Graphs: ", dataset.split_idx['test'].size(0))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
evaluator = Evaluator(name = DATASET_NAME)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# import train functions for all other GCNs
if DATASET_NAME == 'ogbn-mag' or DATASET_NAME == 'ogbn-products':
from train.train_ogb_node_classification import train_epoch as train_epoch, evaluate_network as evaluate_network
elif DATASET_NAME == 'ogbn-proteins':
from train.train_ogb_node_classification import train_epoch_proteins as train_epoch, evaluate_network_proteins as evaluate_network
data = dataset.dataset[0]
# Set split indices to masks.
for split in ['train', 'valid', 'test']:
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[dataset.split_idx[split]] = True
data[f'{split}_mask'] = mask
num_parts = 5 if DATASET_NAME == 'ogbn-mag' else 40
train_loader = RandomNodeSampler(data, num_parts=num_parts, shuffle=True,
num_workers=0)
test_loader = RandomNodeSampler(data, num_parts=5, num_workers=0)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
# for all other models common train function
epoch_train_loss = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_train_acc, epoch_val_acc, epoch_test_acc, epoch_val_loss = evaluate_network(model, device, test_loader, evaluator, epoch)
# _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
train_acc, val_acc, test_acc, _ = evaluate_network(model, device, test_loader, evaluator, epoch)
train_acc, val_acc, test_acc = 100 * train_acc, 100 * val_acc, 100 * test_acc
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', action='store_true', default=False, help="Please give a value for pos_enc")
parser.add_argument('--use_node_embedding', action='store_true', default=False)
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
if args.use_node_embedding is not None:
params['use_node_embedding'] = True if args.use_node_embedding == True else False
dataset = LoadData(DATASET_NAME = DATASET_NAME, use_node_embedding = params['use_node_embedding'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
# net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc==True else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# arxiv 'ogbn-mag'
net_params['in_dim'] = dataset.dataset[0].x.size(1)
# dataset.dataset[0]
# net_params['n_classes'] = torch.unique(dataset.dataset[0].y,dim=0).size(0)
net_params['n_classes'] = dataset.dataset[0].y.size(1) if DATASET_NAME == 'ogbn-proteins' else torch.unique(dataset.dataset[0].y,dim=0).size(0)
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 20,051
| 41.303797
| 202
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/main_SBMs_node_classification.py
|
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader as DataLoaderpyg
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch_geometric.transforms as T
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.SBMs_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(gpu_id))
device = torch.device("cuda:"+ str(gpu_id))
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['GatedGCN_pyg','ResGatedGCN_pyg']:
if net_params['pos_enc']:
print("[!] Adding graph positional encoding.")
dataset._add_positional_encodings(net_params['pos_enc_dim'])
print('Time PE:',time.time()-start0)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
# transform = T.ToSparseTensor() To do to save memory
# self.train.graph_lists = [positional_encoding(g, pos_enc_dim, framework='pyg') for _, g in enumerate(dataset.train)]
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
# train_loader = DataLoaderpyg(trainset, batch_size=2, shuffle=False)
train_loader = DataLoaderpyg(trainset, batch_size=params['batch_size'], shuffle=True) if params['framework'] == 'pyg' else DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoaderpyg(valset, batch_size=params['batch_size'], shuffle=False) if params['framework'] == 'pyg' else DataLoader(valset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
test_loader = DataLoaderpyg(testset, batch_size=params['batch_size'], shuffle=False) if params['framework'] == 'pyg' else DataLoader(testset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']),ncols= 0) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['framework'])
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch, params['framework'])
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch, params['framework'])
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# the function to save the checkpoint
# torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
# it used to test the scripts
# if epoch == 1:
# break
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
break
# Stop training after params['max_time'] hours
if time.time()-start0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch, params['framework'])
_, val_acc = evaluate_network(model, device, val_loader, epoch, params['framework'])
_, train_acc = evaluate_network(model, device, train_loader, epoch, params['framework'])
print("Test Accuracy: {:.4f}".format(test_acc))
print("Val Accuracy: {:.4f}".format(val_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nval ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
test_acc, val_acc,train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
def main():
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--framework', type=str, default= None, help="Please give a framework to use")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
# parameters
params = config['params']
params['framework'] = 'pyg' if MODEL_NAME[-3:] == 'pyg' else 'dgl'
if args.framework is not None:
params['framework'] = str(args.framework)
dataset = LoadData(DATASET_NAME, framework = params['framework'])
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.layer_norm is not None:
net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.pos_enc is not None:
net_params['pos_enc'] = True if args.pos_enc=='True' else False
if args.pos_enc_dim is not None:
net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# SBM
# net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
# net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)
net_params['in_dim'] = torch.unique(dataset.train[0].x,dim=0).size(0) if 'pyg' == params['framework'] else torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0)
net_params['n_classes'] = torch.unique(dataset.train[0].y,dim=0).size(0) if 'pyg' == params['framework'] else torch.unique(dataset.train[0][1], dim=0).size(0)
if MODEL_NAME == 'RingGNN':
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
| 19,999
| 41.643923
| 226
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/gat_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim * num_heads)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc = None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,492
| 38.247191
| 120
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/load_net.py
|
"""
Utility file to select GraphNN model as
selected by the user
"""
from nets.ogb_node_classification.gated_gcn_net import GatedGCNNet, GatedGCNNet_pyg, ResGatedGCNNet_pyg
from nets.ogb_node_classification.gcn_net import GCNNet_pyg
from nets.ogb_node_classification.gat_net import GATNet_pyg
from nets.ogb_node_classification.graphsage_net import GraphSageNet, GraphSageNet_pyg
from nets.ogb_node_classification.mlp_net import MLPNet, MLPNet_pyg
from nets.ogb_node_classification.gin_net import GINNet, GINNet_pyg
from nets.ogb_node_classification.mo_net import MoNet as MoNet_, MoNetNet_pyg
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
def GraphSage(net_params):
return GraphSageNet(net_params)
def MLP(net_params):
return MLPNet(net_params)
def GIN(net_params):
return GINNet(net_params)
def MoNet(net_params):
return MoNet_(net_params)
def GIN_pyg(net_params):
model = GINNet_pyg(net_params)
if net_params['neighbor_aggr_GIN'] == 'mean':
model.aggr = str('mean')
elif net_params['neighbor_aggr_GIN'] == 'max':
model.aggr = str('max')
return model
def MLP_pyg(net_params):
return MLPNet_pyg(net_params)
def GCN_pyg(net_params):
return GCNNet_pyg(net_params)
def GatedGCN_pyg(net_params):
return GatedGCNNet_pyg(net_params)
def ResGatedGCN_pyg(net_params):
return ResGatedGCNNet_pyg(net_params)
def GAT_pyg(net_params):
return GATNet_pyg(net_params)
# self.head = GraphConv(opt.in_channels, channels, conv, act, norm, bias, heads)
def GraphSage_pyg(net_params):
return GraphSageNet_pyg(net_params)
def MoNet_pyg(net_params):
return MoNetNet_pyg(net_params)
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
'GraphSage': GraphSage,
'MLP': MLP,
'GIN': GIN,
'MoNet': MoNet,
'MLP_pyg': MLP_pyg,
'GIN_pyg': GIN_pyg,
'GCN_pyg': GCN_pyg,
'GatedGCN_pyg': GatedGCN_pyg,
'GAT_pyg': GAT_pyg,
'GraphSage_pyg': GraphSage_pyg,
'MoNet_pyg': MoNet_pyg,
'ResGatedGCN_pyg': ResGatedGCN_pyg
}
return models[MODEL_NAME](net_params)
| 2,369
| 27.554217
| 103
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/graphsage_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 5,368
| 35.52381
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/gin_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 5,771
| 35.531646
| 113
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 3,608
| 33.701923
| 110
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/gated_gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
num_bond_type = 3
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.edge_feat = net_params['edge_feat']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
if self.edge_feat:
self.embedding_e = nn.Embedding(num_bond_type, hidden_dim)
else:
self.embedding_e = nn.Linear(1, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
# self.MLP_layer = MLPReadout(hidden_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
# self.MLP_layer = MLPReadout(hidden_dim, n_classes)
self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
# dataset.dataset[0].y.view(-1).size()
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 8,653
| 37.807175
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/mlp_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
# self.readout_mlp = MLPReadout(hidden_dim, n_classes)
self.readout_mlp = nn.Linear(hidden_dim, n_classes, bias=True)
def forward(self, h, edge_index, e, h_pos_enc = None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 4,176
| 29.268116
| 93
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/ogb_node_classification/mo_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_add
import dgl
import numpy as np
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
srcs = 1/np.sqrt(edges.src['deg']+1)
dsts = 1/np.sqrt(edges.dst['deg']+1)
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim_node = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(1, dim) # edge feat is a float
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# self.MLP_layer = MLPReadout(out_dim, n_classes)
self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
def loss_proteins(self, pred, label):
criterion = nn.BCEWithLogitsLoss()
loss = criterion(pred, label.to(torch.float))
return loss
| 6,647
| 39.785276
| 121
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gat_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 5,639
| 35.862745
| 120
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/ring_gnn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import time
"""
Ring-GNN
On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
https://arxiv.org/pdf/1905.12560v1.pdf
"""
from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
from layers.mlp_readout_layer import MLPReadout
class RingGNNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.num_node_type = net_params['in_dim'] # 'num_node_type' is 'nodeclasses' as in RingGNN original repo
# node_classes = net_params['node_classes']
avg_node_num = net_params['avg_node_num']
radius = net_params['radius']
hidden_dim = net_params['hidden_dim']
dropout = net_params['dropout']
n_layers = net_params['L']
self.n_classes = net_params['n_classes']
self.layer_norm = net_params['layer_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.depth = [torch.LongTensor([1+self.num_node_type])] + [torch.LongTensor([hidden_dim])] * n_layers
self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
layer_norm=self.layer_norm,
residual=self.residual,
dropout=dropout,
radius=radius,
k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), self.n_classes)
def forward(self, x_with_node_feat):
"""
CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
: preparing input to the model in form new_adj
"""
x = x_with_node_feat
# this x is the tensor with all info available => adj, node feat
x_list = [x]
for layer in self.equi_modulelist:
x = layer(x)
x_list.append(x)
# readout
x_list = [torch.sum(x, dim=2) for x in x_list]
x_list = torch.cat(x_list, dim=1)
# reshaping in form of [n x d_out]
x_out = x_list.squeeze().permute(1,0)
x_out = self.prediction(x_out)
return x_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 3,202
| 38.060976
| 141
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/load_net.py
|
"""
Utility file to select GraphNN model as
selected by the user
"""
from nets.SBMs_node_classification.gated_gcn_net import GatedGCNNet, GatedGCNNet_pyg, ResGatedGCNNet_pyg
from nets.SBMs_node_classification.gcn_net import GCNNet, GCNNet_pyg
from nets.SBMs_node_classification.gat_net import GATNet, GATNet_pyg
from nets.SBMs_node_classification.graphsage_net import GraphSageNet, GraphSageNet_pyg
from nets.SBMs_node_classification.mlp_net import MLPNet, MLPNet_pyg
from nets.SBMs_node_classification.gin_net import GINNet, GINNet_pyg
from nets.SBMs_node_classification.mo_net import MoNet as MoNet_, MoNetNet_pyg
from nets.SBMs_node_classification.ring_gnn_net import RingGNNNet
from nets.SBMs_node_classification.three_wl_gnn_net import ThreeWLGNNNet
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
def GCN(net_params):
return GCNNet(net_params)
def GAT(net_params):
return GATNet(net_params)
def GraphSage(net_params):
return GraphSageNet(net_params)
def MLP(net_params):
return MLPNet(net_params)
def GIN(net_params):
return GINNet(net_params)
def MoNet(net_params):
return MoNet_(net_params)
def RingGNN(net_params):
return RingGNNNet(net_params)
def ThreeWLGNN(net_params):
return ThreeWLGNNNet(net_params)
def GIN_pyg(net_params):
model = GINNet_pyg(net_params)
if net_params['neighbor_aggr_GIN'] == 'mean':
model.aggr = str('mean')
elif net_params['neighbor_aggr_GIN'] == 'max':
model.aggr = str('max')
return model
def MLP_pyg(net_params):
return MLPNet_pyg(net_params)
def GCN_pyg(net_params):
return GCNNet_pyg(net_params)
def GatedGCN_pyg(net_params):
return GatedGCNNet_pyg(net_params)
def ResGatedGCN_pyg(net_params):
return ResGatedGCNNet_pyg(net_params)
def GAT_pyg(net_params):
return GATNet_pyg(net_params)
# self.head = GraphConv(opt.in_channels, channels, conv, act, norm, bias, heads)
def GraphSage_pyg(net_params):
return GraphSageNet_pyg(net_params)
def MoNet_pyg(net_params):
return MoNetNet_pyg(net_params)
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
'GCN': GCN,
'GAT': GAT,
'GraphSage': GraphSage,
'MLP': MLP,
'GIN': GIN,
'MoNet': MoNet,
'MLP_pyg': MLP_pyg,
'GIN_pyg': GIN_pyg,
'GCN_pyg': GCN_pyg,
'GatedGCN_pyg': GatedGCN_pyg,
'ResGatedGCN_pyg': ResGatedGCN_pyg,
'GAT_pyg': GAT_pyg,
'GraphSage_pyg': GraphSage_pyg,
'MoNet_pyg': MoNet_pyg
}
return models[MODEL_NAME](net_params)
| 2,797
| 28.452632
| 104
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/three_wl_gnn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import time
"""
3WLGNN / ThreeWLGNN
Provably Powerful Graph Networks (Maron et al., 2019)
https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
"""
from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
from layers.mlp_readout_layer import MLPReadout
class ThreeWLGNNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.num_node_type = net_params['in_dim']
depth_of_mlp = net_params['depth_of_mlp']
hidden_dim = net_params['hidden_dim']
dropout = net_params['dropout']
n_layers = net_params['L']
self.n_classes = net_params['n_classes']
self.layer_norm = net_params['layer_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.gin_like_readout = True # if True, uses GIN like readout, but without diag poool, since node task
block_features = [hidden_dim] * n_layers # L here is the block number
original_features_num = self.num_node_type + 1 # Number of features of the input
# sequential mlp blocks
last_layer_features = original_features_num
self.reg_blocks = nn.ModuleList()
for layer, next_layer_features in enumerate(block_features):
mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
self.reg_blocks.append(mlp_block)
last_layer_features = next_layer_features
if self.gin_like_readout:
self.fc_layers = nn.ModuleList()
for output_features in block_features:
# each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
fc = FullyConnected(output_features, self.n_classes, activation_fn=None)
self.fc_layers.append(fc)
else:
self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, self.n_classes)
def forward(self, x_with_node_feat):
x = x_with_node_feat
# this x is the tensor with all info available => adj, node feat
if self.gin_like_readout:
scores = torch.tensor(0, device=self.device, dtype=x.dtype)
else:
x_list = [x]
for i, block in enumerate(self.reg_blocks):
x = block(x)
if self.gin_like_readout:
x_out = torch.sum(x, dim=2) # from [1 x d_out x n x n] to [1 x d_out x n]
x_out = x_out.squeeze().permute(1,0) # reshaping in form of [n x d_out]
scores = self.fc_layers[i](x_out) + scores
else:
x_list.append(x)
if self.gin_like_readout:
return scores
else:
# readout
x_list = [torch.sum(x, dim=2) for x in x_list]
x_list = torch.cat(x_list, dim=1)
# reshaping in form of [n x d_out]
x_out = x_list.squeeze().permute(1,0)
x_out = self.mlp_prediction(x_out)
return x_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 4,050
| 36.859813
| 118
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/graphsage_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
from layers.graphsage_layer import GraphSageLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import SAGEConv
class GraphSageNet(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
# the implementation of dgl and pyg are different
"""
class GraphSageNet_pyg(nn.Module):
"""
Grahpsage network with multiple GraphSageLayer layers
"""
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
self.n_layers = net_params['L']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.readout = net_params['readout']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = nn.Dropout(p=dropout)
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.layers = nn.ModuleList([SAGEConv(hidden_dim, hidden_dim) for _ in
range(self.n_layers - 1)])
self.layers.append(
SAGEConv(hidden_dim, out_dim))
# self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
# dropout, aggregator_type, batch_norm, residual) for _ in
# range(n_layers - 1)])
# self.layers.append(
# GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
if aggregator_type == 'maxpool':
self.aggr = 'max'
elif aggregator_type == 'mean':
self.aggr = 'mean'
# to do
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# graphsage
for i in range(self.n_layers):
h_in = h
h = self.dropout(h)
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
#h = F.relu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 6,149
| 36.048193
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gin_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from gcn_lib.sparse import MultiSeq, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph
from gcn_lib.sparse import MLP as MLPpyg
from gcn_lib.sparse import GraphConv as GraphConvNet
# import torch_geometric as tg
from torch_geometric.nn import GINConv
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers+1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, g, h, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GINNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.normlayers = torch.nn.ModuleList()
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINConv(ApplyNodeFunc(mlp), 0, learn_eps))
# note that neighbor_aggr_type can not work because the father
# self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
# dropout, batch_norm, residual, 0, learn_eps))
if batch_norm:
self.normlayers.append(nn.BatchNorm1d(hidden_dim))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(self.n_layers + 1):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.n_layers):
h_in = h
h = self.ginlayers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
hidden_rep.append(h)
score_over_layer = 0
for i, h in enumerate(hidden_rep):
score_over_layer += self.linears_prediction[i](h)
return score_over_layer
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 6,582
| 36.19209
| 113
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import dgl
import numpy as np
"""
GCN: Graph Convolutional Networks
Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout
class GCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
# note that the GCNLayer is a little different from the builtin function,
# it averaging the received message by reduce, not c_{ij} the papers apply
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class GCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.dropout = dropout
self.layers = nn.ModuleList([GCNConv(hidden_dim, hidden_dim, improved = False)
for _ in range(self.n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)
for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GCN
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.normlayers[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# i = 0
# for conv in self.layers:
# h_in = h
# h = conv(h, e)
# if self.batch_norm:
# h = self.normlayers[i](h) # batch normalization
# i += 1
# h = F.relu(h)
# if self.residual:
# h = h_in + h # residual connection
# h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 5,901
| 34.769697
| 110
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/gated_gcn_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import numpy as np
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
from layers.gated_gcn_layer import GatedGCNLayer, ResGatedGCNLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GatedGraphConv
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class ResGatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ResGatedGCNLayer(hidden_dim, hidden_dim, self.dropout,
self.batch_norm, self.residual) for _ in range(self.n_layers)])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.normlayers_e = nn.ModuleList([nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for i in range(self.n_layers):
h_in = h
e_in = e
h, e = self.layers[i](h, edge_index, e)
if self.batch_norm:
h = self.normlayers_h[i](h)
e = self.normlayers_e[i](e) # batch normalization
if self.residual:
h = h_in + h # residual connection
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
# if self.batch_norm:
# h = self.bn_node_h(h) # batch normalization
# e = self.bn_node_e(e) # batch normalization
#
# h = F.relu(h) # non-linear activation
# e = F.relu(e) # non-linear activation
#
# if self.residual:
# h = h_in + h # residual connection
# e = e_in + e # residual connection
#
# h = F.dropout(h, self.dropout, training=self.training)
# e = F.dropout(e, self.dropout, training=self.training)
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
Gated Graph Sequence Neural Networks
An Experimental Study of Neural Networks for Variable Graphs
Li Y, Tarlow D, Brockschmidt M, et al. Gated graph sequence neural networks[J]. arXiv preprint arXiv:1511.05493, 2015.
https://arxiv.org/abs/1511.05493
Note that the pyg and dgl of the gatedGCN are different models.
"""
class GatedGCNNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
in_dim_edge = 1 # edge_dim (feat is a float)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
self.pos_enc = net_params['pos_enc']
if self.pos_enc:
pos_enc_dim = net_params['pos_enc_dim']
self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
# self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([GatedGraphConv(hidden_dim, n_layers, aggr = 'add')])
# self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
# self.batch_norm, self.residual) for _ in range(n_layers)])
if self.batch_norm:
self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
if self.pos_enc:
h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
h = h + h_pos_enc
# e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
h_in = h
h = conv(h, edge_index, e)
if self.batch_norm:
h = self.normlayers[0](h)
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 9,626
| 37.818548
| 122
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/mlp_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from layers.mlp_readout_layer import MLPReadout
class MLPNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers-1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
class MLPNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
feat_mlp_modules = [
nn.Linear(hidden_dim, hidden_dim, bias=True),
nn.ReLU(),
nn.Dropout(dropout),
]
for _ in range(n_layers - 1):
feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
feat_mlp_modules.append(nn.ReLU())
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
if self.gated:
self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# MLP
h = self.feat_mlp(h)
if self.gated:
h = torch.sigmoid(self.gates(h)) * h
# output
h_out = self.readout_mlp(h)
return h_out
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 4,619
| 29.8
| 91
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/SBMs_node_classification/mo_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch_scatter import scatter_add
# from num_nodes import maybe_num_nodes
import dgl
from torch_geometric.nn.conv import MessagePassing
import numpy as np
import torch.nn as nn
from torch import Tensor
# from torch_geometric.utils import degree
from torch_scatter import scatter_add
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
from layers.gmm_layer import GMMLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GMMConv
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
aggr_type = "sum" # default for MoNet
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
g.ndata['deg'] = g.in_degrees()
g.apply_edges(self.compute_pseudo)
pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
def compute_pseudo(self, edges):
# compute pseudo edge features for MoNet
# to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
# srcs = 1/np.sqrt((edges.src['deg']+1).cpu())
# dsts = 1/np.sqrt((edges.src['deg']+1).cpu())
srcs = 1 / (edges.src['deg'] + 1).float().sqrt()
dsts = 1 / (edges.src['deg'] + 1).float().sqrt()
pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
return {'pseudo': pseudo}
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes>0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
"""
GMM: Gaussian Mixture Model Convolution layer
Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
https://arxiv.org/pdf/1611.08402.pdf
"""
class MoNetNet_pyg(MessagePassing):
def __init__(self, net_params):
super().__init__()
self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
kernel = net_params['kernel'] # for MoNet
dim = net_params['pseudo_dim_MoNet'] # for MoNet
n_classes = net_params['n_classes']
self.dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.n_classes = n_classes
self.dim = dim
# aggr_type = "sum" # default for MoNet
aggr_type = "mean"
self.embedding_h = nn.Embedding(in_dim, hidden_dim)
self.layers = nn.ModuleList()
self.pseudo_proj = nn.ModuleList()
self.batchnorm_h = nn.ModuleList()
# Hidden layer
for _ in range(self.n_layers - 1):
self.layers.append(GMMConv(hidden_dim, hidden_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMConv(hidden_dim, out_dim, dim, kernel, separate_gaussians = False ,aggr = aggr_type,
root_weight = True, bias = True))
if self.batch_norm:
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
# to do
def forward(self, h, edge_index, e):
h = self.embedding_h(h)
edge_weight = torch.ones((edge_index.size(1),),
device = edge_index.device)
row, col = edge_index[0], edge_index[1]
deg = scatter_add(edge_weight, row, dim=0, dim_size=h.size(0))
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
pseudo = torch.cat((deg_inv_sqrt[row].unsqueeze(-1), deg_inv_sqrt[col].unsqueeze(-1)), dim=1)
for i in range(self.n_layers):
h_in = h
h = self.layers[i](h, edge_index, self.pseudo_proj[i](pseudo))
if self.batch_norm:
h = self.batchnorm_h[i](h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return self.MLP_layer(h)
def loss(self, pred, label):
# calculating label weights for weighted loss computation
V = label.size(0)
label_count = torch.bincount(label)
label_count = label_count[label_count.nonzero()].squeeze()
cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
cluster_sizes[torch.unique(label)] = label_count
weight = (V - cluster_sizes).float() / V
weight *= (cluster_sizes > 0).float()
# weighted cross-entropy for unbalanced classes
criterion = nn.CrossEntropyLoss(weight=weight)
loss = criterion(pred, label)
return loss
| 7,669
| 40.236559
| 121
|
py
|
benchmarking-gnns-pyg
|
benchmarking-gnns-pyg-master/nets/Planetoid_node_classification/gat_net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from torch_geometric.typing import OptPairTensor
"""
GAT: Graph Attention Network
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
from layers.gat_layer import GATLayer
from layers.mlp_readout_layer import MLPReadout
from torch_geometric.nn import GATConv
class GATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for conv in self.layers:
h = conv(g, h)
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
return loss
class GATNet_pyg(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_node = net_params['in_dim'] # node_dim (feat is an integer)
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
num_heads = net_params['n_heads']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
self.n_layers = net_params['L']
self.readout = net_params['readout']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim * num_heads) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
dropout = dropout) for _ in range(self.n_layers - 1)])
self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
if self.batch_norm:
self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
# self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
# dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
# self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
def forward(self, h, edge_index, e):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
# GAT
for i in range(self.n_layers):
h_in = h
h: OptPairTensor = (h, h) # make cat the value not simple add it.
h = self.layers[i](h, edge_index)
if self.batch_norm:
h = self.batchnorm_h[i](h)
# if self.activation:
h = F.elu(h)
if self.residual:
h = h_in + h # residual connection
# output
h_out = self.MLP_layer(h)
return h_out
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
return loss
| 4,653
| 34.257576
| 120
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.