index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
30,269
|
Kronholt/harding
|
refs/heads/master
|
/events/migrations/0005_post_full_story.py
|
# Generated by Django 3.1.2 on 2021-01-07 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0004_comment'),
]
operations = [
migrations.AddField(
model_name='post',
name='full_story',
field=models.CharField(blank=True, max_length=10000, null=True),
),
]
|
{"/events/filters.py": ["/events/models.py"], "/events/forms.py": ["/events/models.py"], "/events/views.py": ["/events/filters.py", "/events/forms.py", "/events/models.py"]}
|
30,270
|
Kronholt/harding
|
refs/heads/master
|
/events/migrations/0002_post_tag.py
|
# Generated by Django 3.1.2 on 2021-01-07 13:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_name', models.CharField(max_length=200, null=True)),
('content_date', models.DateTimeField(auto_now_add=True)),
('content_date_start', models.DateTimeField(blank=True, null=True)),
('content_date_end', models.DateTimeField(blank=True, null=True)),
('content_social_description', models.CharField(max_length=1000, null=True)),
('content_image', models.ImageField(blank=True, default='profile1.png', null=True, upload_to='')),
('content_author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='events.volunteer')),
('tags', models.ManyToManyField(to='events.Tag')),
],
),
]
|
{"/events/filters.py": ["/events/models.py"], "/events/forms.py": ["/events/models.py"], "/events/views.py": ["/events/filters.py", "/events/forms.py", "/events/models.py"]}
|
30,271
|
Kronholt/harding
|
refs/heads/master
|
/events/forms.py
|
from .models import Comment
from django.forms import ModelForm
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class CommentForm(ModelForm):
message = forms.CharField(widget=forms.Textarea(attrs={"rows":3}))
class Meta:
model = Comment
fields = ['message']
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
{"/events/filters.py": ["/events/models.py"], "/events/forms.py": ["/events/models.py"], "/events/views.py": ["/events/filters.py", "/events/forms.py", "/events/models.py"]}
|
30,272
|
Kronholt/harding
|
refs/heads/master
|
/events/views.py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout
from .filters import PostFilter
from django.contrib.auth.forms import UserCreationForm
from .forms import CommentForm, CreateUserForm
from .models import *
from .decorators import *
from django.contrib.auth.decorators import login_required
from django.contrib import messages
import requests
import json
# Create your views here.
@login_required(login_url='login')
def index(request):
response = requests.get('http://hardingdevelopment.nexisit.net/harding_api/api_event_search.php?page_num=0&per_page=20&buckets=Volunteering&timezone=25200&app_server_version=3.2&app_version=2&app_build=1&user_id=2&token=70aedda35dca9c192ef551c9f7b570e0&salt=309a9bea4d2695656e83f4fe7b340ee0&app=1&version=3.2').json()
return render(request, 'events/home.html', {'response':response})
def volunteering(request):
posts = Post.objects.filter(post_type='Event')
myFilter = PostFilter(request.GET, queryset=posts)
posts = myFilter.qs
context={'posts':posts, 'myFilter':myFilter}
return render(request, 'events/volunteering.html', context)
def event(request, pk):
post = Post.objects.get(id=pk)
form = CommentForm()
context = {'post':post, 'form':form}
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save()
comment.author = request.user.volunteer
post.comment_set.add(comment)
comment.save()
return redirect('event', pk)
return render(request,'events/event.html', context )
def attend(request, pk):
user = request.user
post = Post.objects.get(id=pk)
post.attending.add(user)
post.save()
return redirect('/')
def stories(request):
posts = Post.objects.filter(post_type='Story')
myFilter = PostFilter(request.GET, queryset=posts)
posts = myFilter.qs
context={'posts':posts, 'myFilter':myFilter}
return render(request, 'events/stories.html', context)
def story(request, pk):
post = Post.objects.get(id=pk)
form = CommentForm()
context = {'post':post, 'form':form}
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save()
comment.author = request.user.volunteer
post.comment_set.add(comment)
comment.save()
return redirect('story', pk)
return render(request,'events/story.html', context)
def register(request):
#this functionality stops a user from visiting register while logged int
form = CreateUserForm()
context={'form':form}
template = 'events/register.html'
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
volunteer = Volunteer(user=user, user_name=user.username)
volunteer.save()
messages.success(request, 'Account was created for ' + username)
return redirect('/')
else:
messages.error(request, 'Something went wrong, please try again.')
return render(request, template, context)
def loginPage(request):
form=UserCreationForm()
context={'form':form}
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.error(request, 'Username or password is incorrect')
template = 'events/login.html'
return render(request, template, context)
def logoutUser(request):
logout(request)
return redirect('login')
|
{"/events/filters.py": ["/events/models.py"], "/events/forms.py": ["/events/models.py"], "/events/views.py": ["/events/filters.py", "/events/forms.py", "/events/models.py"]}
|
30,324
|
hiratara/offline-DOUKAKU-skeletons
|
refs/heads/master
|
/answer.py
|
def solve(input):
return input
|
{"/test.py": ["/answer.py"]}
|
30,325
|
hiratara/offline-DOUKAKU-skeletons
|
refs/heads/master
|
/test.py
|
import unittest
from answer import solve
class TestSequenceFunctions(unittest.TestCase):
def test(self):
with open('patterns.tsv') as f:
for line in f:
num, inputted, expected = line.rstrip().split("\t")
self.assertEqual(
solve(inputted), expected, "%s failed" % num
)
if __name__ == '__main__':
unittest.main()
# % python2.7 test.py
# F
# ======================================================================
# FAIL: test (__main__.TestSequenceFunctions)
# ----------------------------------------------------------------------
# Traceback (most recent call last):
# File "test.py", line 10, in test
# solve(inputted), expected, "%s failed" % num
# AssertionError: #2 failed
#
# ----------------------------------------------------------------------
# Ran 1 test in 0.000s
#
# FAILED (failures=1)
|
{"/test.py": ["/answer.py"]}
|
30,332
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/models/convolutional_models.py
|
import torch
import torch.nn as nn
from modules.tcn import TemporalConvNet
class TCNGenerator(nn.Module):
def __init__(self,
noise_dim: int,
output_dim: int,
hidden_dim: int,
lcond_dim: int = 0,
gcond_dim: int = 0,
n_layers: int = 8,
kernel_size: int = 2,
dropout: float = 0.2
):
"""
Convolutional generator
:param noise_dim: noise dimension
:param output_dim: output dimension
:param hidden_dim: hidden dimension
:param lcond_dim: local condition dimension
:param gcond_dim: global condition dimension
:param n_layers: the number of layers
:param kernel_size: the size of kernel
:param dropout: dropout ratio
"""
super().__init__()
self.lcond_dim = lcond_dim
self.gcond_dim = gcond_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.tcn = TemporalConvNet(noise_dim+lcond_dim+gcond_dim,
[hidden_dim]*n_layers,
kernel_size=kernel_size,
dropout=dropout
)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, noise, local_condition=None, global_condition=None):
"""
:param noise: noise tensor of shape (batch_size, seq_len, noise_dim)
:param local_condition: local condition tensor of shape (batch_size, seq_len, lcond_dim)
:param global_condition: global condition tensor of shape (batch_size, gcond_dim)
:return: Output tensor of shape (batch_size, seq_len, output_dim)
"""
b, t, c = noise.size()
if self.lcond_dim > 0:
input = torch.cat((noise, local_condition), axis=2)
if self.gcond_dim > 0:
input = torch.cat((input, global_condition.unsqueeze(1).expand(b, t, self.gcond_dim)), axis=2)
output = self.tcn(input.transpose(1, 2)).transpose(1, 2)
output = self.linear(output)
return output
class TCNDiscriminator(nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
lcond_dim: int = 0,
gcond_dim: int = 0,
n_layers: int = 8,
kernel_size: int = 2,
dropout: float = 0.2
):
"""
Convolutional discriminator
:param input_dim: input dimension
:param hidden_dim: hidden dimension
:param lcond_dim: local condition dimension
:param gcond_dim: global condition dimension
:param n_layers: the number of layers
:param kernel_size: the size of kernel
:param dropout: dropout ratio
"""
super().__init__()
self.lcond_dim = lcond_dim
self.gcond_dim = gcond_dim
self.hidden_dim = hidden_dim
self.tcn = TemporalConvNet(input_dim+lcond_dim+gcond_dim,
[hidden_dim]*n_layers,
kernel_size=kernel_size,
dropout=dropout
)
self.linear = nn.Linear(hidden_dim, 1)
def forward(self, input, local_condition=None, global_condition=None):
"""
:param input: Input tensor of shape (batch_size, seq_len, input_dim)
:param local_condition: local condition tensor of shape (batch_size, seq_len, lcond_dim)
:param global_condition: global condition tensor of shape (batch_size, gcond_dim)
:return: Output tensor of shape (batch_size, seq_len)
"""
b, t, c = input.size()
if self.lcond_dim > 0:
input = torch.cat((input, local_condition), axis=2)
if self.gcond_dim > 0:
input = torch.cat((input, global_condition.unsqueeze(1).expand(b, t, self.gcond_dim)), axis=2)
output = self.tcn(input.transpose(1, 2)).transpose(1, 2)
output = self.linear(output).view(b, t)
return output
if __name__ == '__main__':
noise_dim = 128
hidden_dim = 256
input_dim=output_dim = 256
lcond_dim = 64
gcond_dim = 64
batch_size = 32
seq_len = 300
n_layers = 3
g = LSTMGenerator(noise_dim=noise_dim, output_dim=output_dim, hidden_dim=hidden_dim, lcond_dim=lcond_dim, gcond_dim=gcond_dim, n_layers=n_layers)
d = LSTMDiscriminator(input_dim=input_dim, hidden_dim=hidden_dim, lcond_dim=lcond_dim, gcond_dim=gcond_dim, n_layers=n_layers)
noise = torch.randn((batch_size, seq_len, noise_dim))
lcond = torch.zeros((batch_size, seq_len, lcond_dim))
gcond = torch.zeros((batch_size, gcond_dim))
output = g(noise, lcond, gcond)
print('generator output size', output.size())
output = d(output, lcond, gcond)
print('discriminator output size', output.size())
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,333
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/gan_trainer.py
|
import torch
import torch.nn
from torch_trainer import TorchTrainer
from models.recurrent_models import LSTMGenerator, LSTMDiscriminator
from models.convolutional_models import TCNGenerator, TCNDiscriminator
from torch.utils.data import DataLoader
class LSTMGANTrainer(TorchTrainer):
def __init__(self, n_epochs, batch_size, noise_dim, etf_dataset, num_workers=1, model='TCN'):
super().__init__()
self.n_epochs = n_epochs
self.batch_size = batch_size
self.dataloader = DataLoader(dataset=etf_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
self.noise_dim = noise_dim
# models
assert model == 'LSTM' or model == 'TCN'
if model == 'LSTM':
lcond_dim = 6
hidden_dim = 128
n_layers = 3
self.generator = LSTMGenerator(
noise_dim=noise_dim,
output_dim=lcond_dim,
hidden_dim=hidden_dim,
lcond_dim=lcond_dim,
gcond_dim=1,
n_layers=n_layers
).to(self.device)
self.discriminator = LSTMDiscriminator(
input_dim=lcond_dim,
hidden_dim=hidden_dim,
lcond_dim=lcond_dim,
gcond_dim=1,
n_layers=n_layers
).to(self.device)
elif model == 'TCN':
lcond_dim = 6
hidden_dim = 32#128
n_layers = 8
self.generator = TCNGenerator(
noise_dim=noise_dim,
output_dim=lcond_dim,
hidden_dim=hidden_dim,
lcond_dim=lcond_dim,
gcond_dim=1,
n_layers=n_layers
).to(self.device)
self.discriminator = TCNDiscriminator(
input_dim=lcond_dim,
hidden_dim=hidden_dim,
lcond_dim=lcond_dim,
gcond_dim=1,
n_layers=n_layers
).to(self.device)
# criterion
self.criterion = torch.nn.BCEWithLogitsLoss().to(self.device)
# optimizers
self.optimizer_g = torch.optim.Adam(self.generator.parameters())
self.optimizer_d = torch.optim.Adam(self.discriminator.parameters())
def train(self):
self.generator.train()
self.discriminator.train()
for epoch in range(self.n_epochs):
for i, data in enumerate(self.dataloader):
lcond, gcond, target = data
lcond = lcond.to(self.device)
gcond = gcond.to(self.device)
target = target.to(self.device)
z = torch.randn((self.batch_size, lcond.shape[1], self.noise_dim), device=self.device)
output = self.generator(z, lcond, gcond)
fake_label = torch.zeros(lcond.shape[:2], device=self.device)
real_label = torch.ones(lcond.shape[:2], device=self.device)
# Update discriminator
self.optimizer_d.zero_grad()
real_loss = self.criterion(self.discriminator(target, lcond, gcond), real_label)
fake_loss = self.criterion(self.discriminator(output.detach(), lcond, gcond), fake_label)
d_loss = (real_loss+fake_loss) / 2
d_loss.backward()
self.optimizer_d.step()
# Update generator
self.optimizer_g.zero_grad()
g_loss = self.criterion(self.discriminator(output, lcond, gcond), real_label)
g_loss.backward()
self.optimizer_g.step()
if i % 10 == 0:
print(i)
def validate(self):
with torch.no_grad():
self.generator.eval()
self.discriminator.eval()
def profile(self):
import torchvision.models as models
model = models.densenet121(pretrained=True)
x = torch.randn((1, 3, 224, 224), requires_grad=True)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
model(x)
print(prof)
if __name__ == '__main__':
import numpy as np
import pandas as pd
from pandas_datareader.data import DataReader
from datetime import datetime
etfs = ['VTI', 'EFA', 'EEM', 'TLT', 'TIP', 'VNQ']
train_start = datetime(2005, 1, 1)
train_end = datetime(2018, 12, 31)
test_start = datetime(2019, 1, 1)
test_end = datetime(2019, 12, 31)
train = DataReader(etfs, 'yahoo', start=train_start, end=train_end)['Adj Close']
test = DataReader(etfs, 'yahoo', start=test_start, end=test_end)['Adj Close']
from preprocessor import ETFScaler
from etf_dataset import ETFDataset
train_data = train.values
max_pred_steps = 200
scaler = ETFScaler(train_data, max_pred_steps)
etf_dataset = ETFDataset(etfs=train_data, seq_len=2000, max_pred_steps=max_pred_steps, scaler=scaler)
print('length:', len(etf_dataset))
n_epochs = 10
batch_size = 16
noise_dim = 4#16
lgt = LSTMGANTrainer(n_epochs=n_epochs,
batch_size=batch_size,
noise_dim=noise_dim,
etf_dataset=etf_dataset,
num_workers=1,
model='TCN'
)
#lgt.profile()
lgt.train()
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,334
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/etf_dataset.py
|
import torch
from torch.utils.data import Dataset
class ETFDataset(Dataset):
def __init__(self, etfs, seq_len, max_pred_steps, scaler):
self.etfs = etfs
self.seq_len = seq_len
self.max_pred_steps = max_pred_steps
self.scaler = scaler
self.block_size = self.etfs.shape[0] - self.max_pred_steps - self.seq_len + 1
self.length = self.block_size * (self.max_pred_steps+1)
self.dtype = torch.float32
assert self.block_size > 0
def __getitem__(self, index):
i = index // self.block_size
j = index % self.block_size
lcond, gcond = self.scaler.transform(self.etfs[j:j+self.seq_len], i/self.max_pred_steps)
target, _ = self.scaler.transform(self.etfs[j+i:j+i+self.seq_len], i/self.max_pred_steps)
lcond = torch.tensor(lcond, dtype=self.dtype)
gcond = torch.tensor(gcond, dtype=self.dtype).view((1,))
target = torch.tensor(target, dtype=self.dtype)
return lcond, gcond, target
def __len__(self):
return self.length
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,335
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/main.py
|
import numpy as np
import pandas as pd
from pandas_datareader.data import DataReader
from datetime import datetime
etfs = ['VTI', 'EFA', 'EEM', 'TLT', 'TIP', 'VNQ']
train_start = datetime(2005,1,1)
train_end = datetime(2012,12,31)
test_start = datetime(2013,1,1)
test_end = datetime(2014,12,31)
train = DataReader(etfs, 'yahoo', start=train_start, end=train_end)['Adj Close']
test = DataReader(etfs, 'yahoo', start=test_start, end=test_end)['Adj Close']
from preprocessor import ETFScaler
scaler = ETFScaler(train.values, 300)
print('okay')
v = scaler.transfer(train.values[0:1], np.array(100))
print(v)
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,336
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/preprocessor.py
|
from sklearn.preprocessing import StandardScaler
import numpy as np
class ETFScaler(object):
def __init__(self, etfs: np.array, max_pred_steps: int):
self.scaler = StandardScaler()
self.scaler.fit(etfs)
self.max_pred_steps = max_pred_steps
def transform(self, etfs: np.array, pred_steps: int):
return self.scaler.transform(etfs), pred_steps/self.max_pred_steps
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,337
|
choyi0521/stock-gan-test
|
refs/heads/master
|
/torch_trainer.py
|
import torch
import numpy as np
import os
import random
class TorchTrainer(object):
def __init__(self):
# set random seed
self.set_random_seed()
# cuda setting
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self):
pass
def validate(self):
pass
def set_random_seed(self, seed=42):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
|
{"/gan_trainer.py": ["/torch_trainer.py", "/models/convolutional_models.py", "/preprocessor.py", "/etf_dataset.py"], "/main.py": ["/preprocessor.py"]}
|
30,357
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/lecbert/__init__.py
|
from .configuration import LecbertConfig
from .datacollator import DataCollatorForLEC
from .modeling import LecbertForPreTraining
from .tokenization import LecbertTokenizer
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,358
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/datamerge.py
|
import os
import random
from datasets import Dataset, concatenate_datasets
random.seed(12345)
if __name__ == "__main__":
ori_dataset = Dataset.load_from_disk('disk/enwiki_bookcorpus-tiny-disk')
rep_dataset = Dataset.load_from_disk('disk/enwiki_bookcorpus-tiny-wrep-disk')
ori_num = ori_dataset.num_rows
rep_num = rep_dataset.num_rows
rep_list = random.sample(range(rep_num), ori_num)
start_idx = 0
def dataset_merge(examples):
input_ids = examples['input_ids']
input_ids = [ids.detach().numpy().tolist() for ids in input_ids]
global start_idx
end_idx = start_idx + len(input_ids)
slc_list = rep_list[start_idx:end_idx]
print(start_idx, end_idx)
start_idx = end_idx
original_sent = []
synonym_sent = []
antonym_sent = []
synonym_antonym_sent = []
replace_label = []
for s in slc_list:
t_d = rep_dataset[s]
original_sent.append(t_d['original_sent'])
synonym_sent.append(t_d['synonym_sent'])
antonym_sent.append(t_d['antonym_sent'])
synonym_antonym_sent.append(t_d['synonym_antonym_sent'])
replace_label.append(t_d['replace_label'])
return {'input_ids': input_ids,
'original_sent': original_sent,
'synonym_sent': synonym_sent,
'antonym_sent': antonym_sent,
'synonym_antonym_sent': synonym_antonym_sent,
'replace_label':replace_label}
dataset = ori_dataset.map(dataset_merge,
batched=True,
batch_size=5000,
writer_batch_size=5000,
remove_columns=ori_dataset.column_names,
load_from_cache_file=True,
cache_file_name="./cache/wrep-tiny-train.arrow",
num_proc=1)
dataset.set_format(type=None, columns=['input_ids', 'original_sent', 'synonym_sent', 'antonym_sent', 'synonym_antonym_sent', 'replace_label'])
dataset.save_to_disk("enwiki_bookcorpus-tiny-lec-disk")
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,359
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/wordnet.py
|
from nltk.corpus import wordnet as wn
#from nltk.stem import WordNetLemmatizer
from lemminflect import getInflection
#wnl = WordNetLemmatizer()
REPLACE_TAG = ['NN', 'NNS', 'JJ', 'JJR', 'JJS', 'RB', 'RBR', 'RBS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'] # [NNP, NNPS]
REPLACE_POS = ['NOUN', 'VERB', 'ADJ', 'ADV']
POS_TO_TAGS = {'NOUN': ['NN', 'NNS'],
'ADJ': ['JJ', 'JJR', 'JJS'],
'VERB': ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'ADV': ['RB', 'RBR', 'RBS']}
def get_synonym(token):
lemma = token.lemma_
text = token.text
tag = token.tag_
pos = token.pos_
word_synset = set()
if pos not in REPLACE_POS:
return list(word_synset)
synsets = wn.synsets(text, pos=eval("wn."+pos))
for synset in synsets:
words = synset.lemma_names()
for word in words:
#word = wnl.lemmatize(word, pos=eval("wn."+pos))
if word.lower() != text.lower() and word.lower() != lemma.lower():
# inflt = getInflection(word, tag=tag)
# word = inflt[0] if len(inflt) else word
word = word.replace('_', ' ')
word_synset.add(word)
return list(word_synset)
def get_hypernyms(token):
lemma = token.lemma_
text = token.text
tag = token.tag_
pos = token.pos_
word_hypernyms = set()
if pos not in REPLACE_POS:
return list(word_hypernyms)
synsets = wn.synsets(text, pos=eval("wn."+pos))
for synset in synsets:
for hyperset in synset.hypernyms():
words = hyperset.lemma_names()
for word in words:
#word = wnl.lemmatize(word, pos=eval("wn."+pos))
if word.lower() != text.lower() and word.lower() != lemma.lower():
# inflt = getInflection(word, tag=tag)
# word = inflt[0] if len(inflt) else word
word = word.replace('_', ' ')
word_hypernyms.add(word)
return list(word_hypernyms)
def get_antonym(token):
lemma = token.lemma_
text = token.text
tag = token.tag_
pos = token.pos_
word_antonym = set()
if pos not in REPLACE_POS:
return list(word_antonym)
synsets = wn.synsets(text, pos=eval("wn."+pos))
for synset in synsets:
for synlemma in synset.lemmas():
for antonym in synlemma.antonyms():
word = antonym.name()
#word = wnl.lemmatize(word, pos=eval("wn."+pos))
if word.lower() != text.lower() and word.lower() != lemma.lower():
# inflt = getInflection(word, tag=tag)
# word = inflt[0] if len(inflt) else word
word = word.replace('_', ' ')
word_antonym.add(word)
return list(word_antonym)
def get_lemminflect(token):
text = token.text
lemma = token.lemma_
tag = token.tag_
pos = token.pos_
word_lemminflect = set()
if pos not in REPLACE_POS:
return list(word_lemminflect)
tags = POS_TO_TAGS[pos]
for tg in tags:
if tg == tag: continue
inflects = getInflection(lemma, tag=tg)
for word in inflects:
if word.lower() != text.lower():
word_lemminflect.add(word)
return list(word_lemminflect)
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,360
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/lecbert/tokenization.py
|
from transformers import RobertaTokenizer
from typing import List, Optional
REPLACE_NONE = -100
class LecbertTokenizer(RobertaTokenizer):
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_token_label_from_sequences(
self, labels_0: List[int], labels_1: Optional[List[int]] = None
) -> List[int]:
cls = [REPLACE_NONE]
sep = [REPLACE_NONE]
if labels_1 is None:
return cls + labels_0 + sep
return cls + labels_0 + sep + sep + labels_1 + sep
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,361
|
Marvinmw/CLINE
|
refs/heads/master
|
/preprocess/tokenizer_train.py
|
# -*- coding:utf-8 -*-
import os
from argparse import ArgumentParser
from tokenizers import ByteLevelBPETokenizer, CharBPETokenizer
from tokenizers import normalizers
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--input_path", type=str, nargs='?', required=True, help="")
parser.add_argument("--output_path", type=str, nargs='?', required=True, help="")
parser.add_argument("--bytelevel", action="store_true", default=False, help="")
parser.add_argument("--prefix_space", action="store_true", default=False, help="")
parser.add_argument("--trim_offsets", action="store_true", default=False, help="")
parser.add_argument("--lowercase", action="store_true", default=False, help="")
parser.add_argument("--normalizer", type=str, default="nfkc", nargs='?', help="[nfc, nfd, nfkc, nfkd]")
parser.add_argument("--bert_normalizer", action="store_true", default=False, help="")
parser.add_argument("--vocab", type=int, default=52_000, nargs='?', help="")
parser.add_argument("--minfreq", type=int, default=2, nargs='?', help="")
args = parser.parse_args()
file_path = args.input_path
if os.path.isdir(file_path):
file_names = os.listdir(file_path)
file_path = [os.path.join(file_path, fn) for fn in file_names]
outpath = args.output_path
if not os.path.exists(outpath):
os.mkdir(outpath)
# Initialize a tokenizer
if args.bytelevel:
tokenizer = ByteLevelBPETokenizer(add_prefix_space=args.prefix_space,
trim_offsets=args.trim_offsets,
lowercase=args.lowercase,
unicode_normalizer=args.normalizer)
# tokenizer._tokenizer.normalizer = normalizers.Sequence([
# normalizers.Strip(),
# normalizers.Lowercase(),
# normalizers.NFKC()
# ])
# Customize training
tokenizer.train(files=file_path,
vocab_size=args.vocab,
min_frequency=args.minfreq,
special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
else:
tokenizer = CharBPETokenizer(suffix="",
lowercase=args.lowercase,
unicode_normalizer=args.normalizer,
bert_normalizer=args.bert_normalizer)
# Customize training
tokenizer.train(files=file_path,
vocab_size=args.vocab,
min_frequency=args.minfreq,
suffix="",
special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
tokenizer.save_model(outpath)
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,362
|
Marvinmw/CLINE
|
refs/heads/master
|
/preprocess/extract_sentence.py
|
# -*- coding:utf-8 -*-
import os
import sys
import spacy
import re
from multiprocessing import Process
from argparse import ArgumentParser
nlp = spacy.load('en_core_web_sm')
#boundary = re.compile('[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]')
def custom_seg(doc):
length = len(doc)
for index, token in enumerate(doc):
if token.text in ['"', "'", "‘", "’", "“", "”"] and index!=(length - 1):
doc[index+1].sent_start = False
return doc
nlp.add_pipe(custom_seg, before='parser')
def get_articles(path):
file = open(path, "r", encoding='utf-8')
articles = [eval(x)['text'] for x in file.readlines()]
file.close()
return articles
def get_sentences(article):
doc = nlp(article)
sents = list(doc.sents)
return sents
class MyProcess(Process):
def __init__(self, files, dirname, outname):
super(MyProcess, self).__init__()
self.files = files
self.dirname = dirname
self.outname = outname
def run(self):
if not os.path.exists(self.dirname):
os.mkdir(self.dirname)
outfile = open(os.path.join(self.dirname, str(self.outname)), 'w', encoding="utf-8")
for idx, path in enumerate(self.files):
#print(idx)
articles = get_articles(path)
for arti in articles:
arti = str(arti)
arti = arti.strip()
arti = re.sub('[\s]+', ' ', arti)
arti = arti.strip()
if not arti: continue
outfile.write('{}\n'.format(arti))
# sents = get_sentences(arti)
# for sen in sents:
# sen = str(sen)
# sen = sen.strip()
# sen = re.sub('[\n]+', ' ', sen)
# sen = sen.strip()
# if not sen: continue
# #if len(sen) < 2: continue
# #print(sen.encode('ascii'))
# outfile.write('{}\n'.format(sen))
# outfile.write('\n')
outfile.close()
def bisector_list(tabulation, num):
seg = len(tabulation)//num
ans = []
for i in range(num):
start = i*seg
end = (i+1)*seg if i!=num-1 else len(tabulation)
ans.append(tabulation[start:end])
return ans
def walk(path):
out = []
for root, dirs, files in os.walk(path):
for name in files:
out.append(os.path.join(root, name))
return out
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--input_path", type=str, nargs='?', required=True, help="")
parser.add_argument("--output_path", type=str, nargs='?', required=True, help="")
parser.add_argument("--processnum", type=int, default=6, nargs='?', help="")
args = parser.parse_args()
dir_path = args.input_path
out_path = args.output_path
process_num = args.processnum
files = walk(dir_path)
n_files = bisector_list(files, process_num)
processes = []
for i in range(process_num):
p = MyProcess(n_files[i], out_path, i)
p.start()
processes.append(p)
for p in processes:
p.join()
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,363
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/dataloader.py
|
import os
from datasets import load_dataset, Dataset
from typing import Optional
from dataclasses import dataclass, field
from transformers import (
HfArgumentParser,
PreTrainedTokenizer
)
from transformers import AutoConfig, AutoTokenizer
import random
import spacy
random.seed(12345)
from spacy.tokens import Doc
Doc.set_extension('_synonym_sent', default=False)
Doc.set_extension('_synonym_intv', default=False)
Doc.set_extension('_ori_syn_intv', default=False)
Doc.set_extension('_antonym_sent', default=False)
Doc.set_extension('_antonym_intv', default=False)
Doc.set_extension('_ori_ant_intv', default=False)
from wordnet import (
REPLACE_POS,
get_synonym,
get_hypernyms,
get_antonym,
get_lemminflect
)
from random_words import RandomWords
rw = RandomWords()
REPLACE_RATIO = 0.5
REPLACE_ORIGINAL = 0
REPLACE_LEMMINFLECT = 1
REPLACE_SYNONYM = 2
REPLACE_HYPERNYMS = 3
REPLACE_ANTONYM = 4
REPLACE_RANDOM = 5
REPLACE_ADJACENCY = 6
REPLACE_NONE = -100
SYNONYM_RATIO = 1/3
HYPERNYMS_RATIO = 1/3
LEMMINFLECT_RATIO = 1/3
ANTONYM_RATIO = 1/2
RANDOM_RATIO = 1/2
# ADJACENCY_RATIO = 1/3
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: "},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
word_replace: bool = field(
default=False,
metadata={"help": "Whether synonym substitution is used to construct adversarial samples."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
plm_probability: float = field(
default=1 / 6,
metadata={
"help": "Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling."
},
)
max_span_length: int = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocess_batch_size: int = field(
default=1000, metadata={"help": "Number of examples per batch provided to preprocess function."}
)
preprocess_cache_file: Optional[str] = field(
default=None, metadata={"help": "Provide the name of a cache file to use to store the results of the computation instead of the automatically generated cache file name."}
)
preprocess_model_type: Optional[str] = field(
default=None, metadata={"help": "Model type in [bert, electra, roberta]"}
)
load_from_disk: bool = field(
default=False, metadata={"help": "Load dataset from disk."}
)
preprocess_output_file: Optional[str] = field(
default=None, metadata={"help": "Path to preprocess dataset."}
)
word_replace_file: Optional[str] = field(
default=None, metadata={"help": "Path to preprocess wordreplace dataset."}
)
lang: Optional[str] = field(
default="en", metadata={"help": "Language of dataset [en, zh]."}
)
def get_replace_label(args, word_list, repl_intv, orig_sent):
label = [REPLACE_NONE] * len(word_list)
if not repl_intv:
return label
byte_index = 0 # point to the start of the next token in the byte type sentence
orig_index = 0 # point to the start of the next token in the utf-8 type sentence
cur_range = 0
cur_start, cur_end, cur_label = repl_intv[cur_range] # raplacement range is of increasing ordered (include spaces in text)
for index, word in enumerate(word_list):
if byte_index >= cur_start and byte_index <= cur_end: # word piece is in replacement range
label[index] = cur_label
if args.preprocess_model_type in ['roberta']:
byte_offset = len(word) # bytelevel contains spaces in the token
elif args.preprocess_model_type in ['bert', 'electra']:
if word[:2] == '##':
orig_offset = len(word[2:])
else:
if index == 0 or orig_sent[orig_index] != " ":
orig_offset = len(word)
else:
orig_offset = len(word) + 1
byte_offset = len(orig_sent[orig_index:orig_index+orig_offset].encode('utf-8'))
orig_index += orig_offset
else:
byte_offset = len(word)
byte_index += byte_offset # bytelevel contains spaces in the token
if byte_index > cur_end: # update replacement range
if cur_range != len(repl_intv)-1: # not the last range
cur_range += 1
cur_start, cur_end, cur_label = repl_intv[cur_range]
else: # no new range
break
assert cur_range == len(repl_intv)-1
return label
def get_dataset(
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
evaluate: bool = False,
cache_dir: Optional[str] = None,
spacy_nlp=None
):
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.load_from_disk:
return Dataset.load_from_disk(file_path)
if os.path.isdir(file_path):
file_names = os.listdir(file_path)
file_path = [os.path.join(file_path, fn) for fn in file_names]
dataset = load_dataset("src/text.py", data_files=file_path, split="train", cache_dir=cache_dir, ignore_verifications=True)
def lines_to_block(examples):
outputs = []
block_size = args.block_size - tokenizer.num_special_tokens_to_add(pair=False)
lines = examples['text']
text = "\n".join(lines)
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
outputs.append(
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
return {'input_ids': outputs}
def word_replace(examples):
inputs = []
block_size = args.block_size - tokenizer.num_special_tokens_to_add(pair=False)
lines = examples['text']
text = "\n".join(lines)
tokenized_text = tokenizer.tokenize(text)
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
inputs.append(tokenizer.convert_tokens_to_string(tokenized_text[i : i + block_size]))
# inputs = examples['text']
original_sent = []
ori_syn_intv = []
ori_ant_intv = []
synonym_sent = []
synonym_intv = []
antonym_sent = []
antonym_intv = []
docs = spacy_nlp.pipe(inputs, n_process=1, batch_size=100, disable=['parser', 'ner'])
for doc in docs:
ori_sent = " ".join([t.text for t in doc])
syn_sent = " ".join(doc._._synonym_sent)
ant_sent = " ".join(doc._._antonym_sent)
syn_intv = doc._._synonym_intv
ant_intv = doc._._antonym_intv
os_intv = doc._._ori_syn_intv
oa_intv = doc._._ori_ant_intv
original_sent.append(ori_sent)
ori_syn_intv.append(os_intv)
ori_ant_intv.append(oa_intv)
synonym_sent.append(syn_sent)
synonym_intv.append(syn_intv)
antonym_sent.append(ant_sent)
antonym_intv.append(ant_intv)
return {'original_sent': original_sent,
'ori_syn_intv': ori_syn_intv,
'ori_ant_intv': ori_ant_intv,
'synonym_sent': synonym_sent,
'synonym_intv': synonym_intv,
'antonym_sent': antonym_sent,
'antonym_intv': antonym_intv}
def convert_tokens_to_ids(examples):
input_ids = []
ori_syn_label = []
ori_ant_label = []
synonym_ids = []
synonym_label = []
antonym_ids = []
antonym_label = []
exp_nums = len(examples['original_sent'])
for i in range(exp_nums):
ori_sent = tokenizer.tokenize(examples['original_sent'][i])
syn_sent = tokenizer.tokenize(examples['synonym_sent'][i])
ant_sent = tokenizer.tokenize(examples['antonym_sent'][i])
syn_labl = get_replace_label(args, syn_sent, examples['synonym_intv'][i], examples['synonym_sent'][i])
ori_syn_labl = get_replace_label(args, ori_sent, examples['ori_syn_intv'][i], examples['original_sent'][i])
ant_labl = get_replace_label(args, ant_sent, examples['antonym_intv'][i], examples['antonym_sent'][i])
ori_ant_labl = get_replace_label(args, ori_sent, examples['ori_ant_intv'][i], examples['original_sent'][i])
assert syn_labl.count(-100) == ori_syn_labl.count(-100) and syn_labl.count(0) == ori_syn_labl.count(0)
assert ant_labl.count(-100) == ori_ant_labl.count(-100) and ant_labl.count(0) == ori_ant_labl.count(0)
ori_ids = tokenizer.convert_tokens_to_ids(ori_sent)
syn_ids = tokenizer.convert_tokens_to_ids(syn_sent)
ant_ids = tokenizer.convert_tokens_to_ids(ant_sent)
input_ids.append(ori_ids)
ori_syn_label.append(ori_syn_labl)
ori_ant_label.append(ori_ant_labl)
synonym_ids.append(syn_ids)
synonym_label.append(syn_labl)
antonym_ids.append(ant_ids)
antonym_label.append(ant_labl)
return {'input_ids': input_ids,
'ori_syn_label': ori_syn_label,
'ori_ant_label': ori_ant_label,
'synonym_ids': synonym_ids,
'synonym_label': synonym_label,
'antonym_ids': antonym_ids,
'antonym_label': antonym_label}
if args.line_by_line:
dataset = dataset.map(lambda ex: tokenizer(ex["text"], add_special_tokens=True,
truncation=True, max_length=args.block_size),
batched=True,
batch_size=args.preprocess_batch_size,
writer_batch_size=args.preprocess_batch_size,
remove_columns=dataset.column_names,
load_from_cache_file=True,
cache_file_name=args.preprocess_cache_file)
dataset.set_format(type=None, columns=['input_ids'])
elif args.word_replace:
if args.word_replace_file and os.path.exists(args.word_replace_file):
dataset = Dataset.load_from_disk(args.word_replace_file)
else:
dataset = dataset.map(word_replace,
batched=True,
batch_size=args.preprocess_batch_size,
writer_batch_size=args.preprocess_batch_size,
remove_columns=dataset.column_names,
load_from_cache_file=True,
cache_file_name=args.preprocess_cache_file)
dataset.set_format(type=None, columns=['original_sent', 'ori_syn_intv', 'ori_ant_intv',
'synonym_sent', 'synonym_intv', 'antonym_sent', 'antonym_intv'])
dataset.save_to_disk(args.word_replace_file)
dataset = dataset.map(convert_tokens_to_ids,
batched=True,
batch_size=args.preprocess_batch_size,
writer_batch_size=args.preprocess_batch_size,
remove_columns=dataset.column_names,
load_from_cache_file=False)
dataset.set_format(type=None, columns=['input_ids', 'ori_syn_label', 'ori_ant_label',
'synonym_ids', 'synonym_label', 'antonym_ids', 'antonym_label'])
else:
dataset = dataset.map(lines_to_block,
batched=True,
batch_size=args.preprocess_batch_size,
writer_batch_size=args.preprocess_batch_size,
remove_columns=dataset.column_names,
load_from_cache_file=True,
cache_file_name=args.preprocess_cache_file)
dataset.set_format(type=None, columns=['input_ids'])
return dataset
def search_replacement(doc, candidate_index, replace_type, max_num, pos_to_words=None):
sr_rep = []
if max_num < 1:
return sr_rep
for r_idx in candidate_index:
token = doc[r_idx]
rep = None
if replace_type == REPLACE_ANTONYM:
reps = get_antonym(token)
rep = random.choice(reps) if reps else None
elif replace_type == REPLACE_ADJACENCY:
reps = pos_to_words[token.pos_]
rep = random.choice(reps) if reps else None
elif replace_type == REPLACE_RANDOM:
rep = rw.random_word()
elif replace_type == REPLACE_SYNONYM:
reps = get_synonym(token)
rep = random.choice(reps) if reps else None
elif replace_type == REPLACE_HYPERNYMS:
reps = get_hypernyms(token)
rep = random.choice(reps) if reps else None
elif replace_type == REPLACE_LEMMINFLECT:
reps = get_lemminflect(token)
rep = random.choice(reps) if reps else None
else:
pass
if rep and rep.lower() != token.text.lower():
sr_rep.append((r_idx, rep, replace_type))
if len(sr_rep) >= max_num:
break
return sr_rep
def replace_word(doc):
synonym_sent = []
synonym_intv = []
ori_syn_intv = []
antonym_sent = []
antonym_intv = []
ori_ant_intv = []
length = len(doc)
rep_num = int(length*REPLACE_RATIO)
rep_index = []
# pos_word = {p:[] for p in REPLACE_POS}
for index, token in enumerate(doc):
if token.pos_ in REPLACE_POS:
rep_index.append(index)
# pos_word[token.pos_].append(token.text)
rep_num = min(rep_num, len(rep_index))
syn_rand = random.random()
ant_rand = random.random()
syn_index = rep_index[:]
random.shuffle(syn_index)
ant_index = rep_index[:]
random.shuffle(ant_index)
syn_replace = []
ant_replace = [] # [(rep_idx, rep_word, rep_type)]
############### Antonym Replacement ####################
if ant_rand < ANTONYM_RATIO:
ant_replace = search_replacement(doc, candidate_index=ant_index, replace_type=REPLACE_ANTONYM, max_num=rep_num)
# if not ant_replace and ant_rand < ANTONYM_RATIO + ADJACENCY_RATIO:
# ant_replace = search_replacement(doc, candidate_index=ant_index, replace_type=REPLACE_ADJACENCY, max_num=rep_num, pos_to_words=pos_word)
if not ant_replace:
ant_replace = search_replacement(doc, candidate_index=ant_index, replace_type=REPLACE_RANDOM, max_num=rep_num)
############### Synonym Replacement ####################
if syn_rand < HYPERNYMS_RATIO:
syn_replace = search_replacement(doc, candidate_index=syn_index, replace_type=REPLACE_HYPERNYMS, max_num=rep_num)
if not syn_replace and syn_rand < HYPERNYMS_RATIO + SYNONYM_RATIO:
syn_replace = search_replacement(doc, candidate_index=syn_index, replace_type=REPLACE_SYNONYM, max_num=rep_num)
if not syn_replace:
syn_replace = search_replacement(doc, candidate_index=syn_index, replace_type=REPLACE_LEMMINFLECT, max_num=rep_num)
############### Original Replacement ####################
all_replace = ant_replace + syn_replace
all_replace = sorted(all_replace, key=lambda x:x[0], reverse=True)
ori_len = -1 # point to the space before next token
syn_len = -1
ant_len = -1
rep_idx, rep_word, rep_type = all_replace.pop() if all_replace else (None, None, None)
for index, token in enumerate(doc):
ori = syn = ant = token.text
while index == rep_idx:
if rep_type in [REPLACE_SYNONYM, REPLACE_HYPERNYMS, REPLACE_LEMMINFLECT]:
syn = rep_word
synonym_intv.append((syn_len, syn_len + len(syn.encode('utf-8')), rep_type)) # fix length mismatch, mx.encode for bytelevelbpe
ori_syn_intv.append((ori_len, ori_len + len(ori.encode('utf-8')), rep_type))
elif rep_type in [REPLACE_ANTONYM, REPLACE_RANDOM]:
ant = rep_word
antonym_intv.append((ant_len, ant_len + len(ant.encode('utf-8')), rep_type))
ori_ant_intv.append((ori_len, ori_len + len(ori.encode('utf-8')), rep_type))
else:
pass
rep_idx, rep_word, rep_type = all_replace.pop() if all_replace else (None, None, None)
if index in rep_index:
if ori == syn:
synonym_intv.append((syn_len, syn_len + len(syn.encode('utf-8')), REPLACE_ORIGINAL))
ori_syn_intv.append((ori_len, ori_len + len(ori.encode('utf-8')), REPLACE_ORIGINAL))
if ori == ant:
antonym_intv.append((ant_len, ant_len + len(ant.encode('utf-8')), REPLACE_ORIGINAL))
ori_ant_intv.append((ori_len, ori_len + len(ori.encode('utf-8')), REPLACE_ORIGINAL))
ori_len = ori_len + len(ori.encode('utf-8')) + 1
syn_len = syn_len + len(syn.encode('utf-8')) + 1 # +1 to point the space before next token
ant_len = ant_len + len(ant.encode('utf-8')) + 1
synonym_sent.append(syn)
antonym_sent.append(ant)
doc._._synonym_sent = synonym_sent
doc._._synonym_intv = synonym_intv
doc._._ori_syn_intv = ori_syn_intv
doc._._antonym_sent = antonym_sent
doc._._antonym_intv = antonym_intv
doc._._ori_ant_intv = ori_ant_intv
return doc
if __name__ == "__main__":
# Running before 'run.py' to generate a cache for dataset.
# Otherwise, each process will generates a cache separately.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments,))
model_args, data_args = parser.parse_args_into_dataclasses()
spacy_nlp = spacy.load(data_args.lang) # 'en_core_web_sm'
spacy_nlp.add_pipe(replace_word, last=True)
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir, config=config)
dataset = get_dataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, spacy_nlp=spacy_nlp)
dataset.save_to_disk(data_args.preprocess_output_file)
# txt = ["Blue Network The BlueNetwork (previously the NBC Blue Network) was the on-air —— name of the now defunct American radio network, which ran from 1927 to 1945."]
# docs = spacy_nlp.pipe(txt, n_process=1, batch_size=100, disable=['parser', 'ner'])
# for doc in docs:
# print(" ".join([t.text for t in doc]))
# print(" ".join(doc._._synonym_sent))
# print(" ".join(doc._._antonym_sent))
# ori_sent = tokenizer.tokenize(" ".join([t.text for t in doc]))
# syn_sent = tokenizer.tokenize(" ".join(doc._._synonym_sent))
# ant_sent = tokenizer.tokenize(" ".join(doc._._antonym_sent))
# syn_labl = get_replace_label(syn_sent, doc._._synonym_intv)
# ori_syn_labl = get_replace_label(ori_sent, doc._._ori_syn_intv)
# ant_labl = get_replace_label(ant_sent, doc._._antonym_intv)
# ori_ant_labl = get_replace_label(ori_sent, doc._._ori_ant_intv)
# print([(ori_sent[i], ori_syn_labl[i]) for i in range(len(ori_sent))])
# print([(syn_sent[i], syn_labl[i])for i in range(len(syn_labl))])
# print(doc._._synonym_intv[0][-1])
# print([(ori_sent[i], ori_ant_labl[i]) for i in range(len(ori_sent))])
# print([(ant_sent[i], ant_labl[i])for i in range(len(ant_labl))])
# print(doc._._antonym_intv[0][-1])
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,364
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/lecbert/datacollator.py
|
import torch
from torch.nn.utils.rnn import pad_sequence
from typing import List, Dict, Tuple
from dataclasses import dataclass
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
REPLACE_NONE = -100
@dataclass
class DataCollatorForLEC:
"""
Data collator used for linguistic error correction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and linguistic error correction
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
block_size: int = 512
def __call__(self, examples: List[Dict[str, List[int]]]) -> Dict[str, torch.Tensor]:
batch_size = len(examples)
block_size = self.block_size - self.tokenizer.num_special_tokens_to_add(pair=False)
ori_sent = []
ori_mask = []
syn_sent = []
syn_mask = []
ant_sent = []
ant_mask = []
ori_label = []
syn_label = []
ant_label = []
for example in examples:
ori_sen = self.tokenizer.build_inputs_with_special_tokens(example["input_ids"][:block_size])
ori_lab = self.tokenizer.create_token_label_from_sequences([REPLACE_NONE]*len(example["input_ids"][:block_size]))
syn_sen = self.tokenizer.build_inputs_with_special_tokens(example["synonym_ids"][:block_size])
syn_lab = example["synonym_label"][:block_size]
syn_lab = [1 if lb not in [REPLACE_NONE, 0] else lb for lb in syn_lab]
syn_lab = self.tokenizer.create_token_label_from_sequences(syn_lab)
ant_sen = self.tokenizer.build_inputs_with_special_tokens(example["antonym_ids"][:block_size])
ant_lab = example["antonym_label"][:block_size]
ant_lab = [2 if lb not in [REPLACE_NONE, 0] else lb for lb in ant_lab]
ant_lab = self.tokenizer.create_token_label_from_sequences(ant_lab)
ori_sent += [torch.tensor(ori_sen, dtype=torch.long)]
ori_mask += [torch.ones(len(ori_sen))]
syn_sent += [torch.tensor(syn_sen, dtype=torch.long)]
syn_mask += [torch.ones(len(syn_sen))]
ant_sent += [torch.tensor(ant_sen, dtype=torch.long)]
ant_mask += [torch.ones(len(ant_sen))]
ori_label += [torch.tensor(ori_lab, dtype=torch.long)]
syn_label += [torch.tensor(syn_lab, dtype=torch.long)]
ant_label += [torch.tensor(ant_lab, dtype=torch.long)]
input_ids = ori_sent + syn_sent + ant_sent
attention_mask = ori_mask + syn_mask + ant_mask
labels = ori_label + syn_label + ant_label
assert len(input_ids) == batch_size * 3
assert len(attention_mask) == batch_size * 3
assert len(labels) == batch_size * 3
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
attention_mask = pad_sequence(attention_mask, batch_first=True, padding_value=0)
labels = pad_sequence(labels, batch_first=True, padding_value=REPLACE_NONE)
mlm_sent, mlm_label = self.mask_tokens(input_ids[:batch_size])
input_ids[:batch_size] = mlm_sent
labels[:batch_size] = mlm_label
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels
}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = REPLACE_NONE # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,365
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/lecbert/configuration.py
|
from transformers import RobertaConfig
class LecbertConfig(RobertaConfig):
num_token_error = 3
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,366
|
Marvinmw/CLINE
|
refs/heads/master
|
/preprocess/tokenizer_test.py
|
# -*- coding:utf-8 -*-
import os
from argparse import ArgumentParser
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing, RobertaProcessing
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--token_path", type=str, nargs='?', required=True, help="")
args = parser.parse_args()
inputpath = args.token_path
tokenizer = ByteLevelBPETokenizer(
os.path.join(inputpath, "vocab.json"),
os.path.join(inputpath, "merges.txt"),
add_prefix_space=True,
trim_offsets=True,
lowercase=True,
unicode_normalizer="nfkc"
)
tokenizer._tokenizer.post_processor = RobertaProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
trim_offsets=True,
add_prefix_space=True
)
tokenizer.enable_truncation(max_length=512)
tokens = tokenizer.encode("I am Julien\nI am from China.").tokens
print([x.encode('utf-8') for x in tokens])
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,367
|
Marvinmw/CLINE
|
refs/heads/master
|
/src/lecbert/modeling.py
|
import warnings
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss, BCELoss
from dataclasses import dataclass
from typing import Optional, Tuple
from transformers.activations import ACT2FN, gelu
from transformers.file_utils import ModelOutput
from transformers.modeling_roberta import RobertaModel, RobertaPreTrainedModel
# Copied from transformers.modeling_roberta.RobertaLMHead
class LecbertLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
# Copied from transformers.modeling_roberta.RobertaLMHead with config.vocab_size->config.num_token_error
class LecbertTECHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.num_token_error, bias=False)
self.bias = nn.Parameter(torch.zeros(config.num_token_error))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of labels
x = self.decoder(x)
return x
class LecbertForPreTraining(RobertaPreTrainedModel):
authorized_missing_keys = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.mlm_head = LecbertLMHead(config)
self.tokn_classifier = LecbertTECHead(config)
self.log_vars = nn.Parameter(torch.zeros(3))
self.init_weights()
def get_output_embeddings(self):
return self.mlm_head.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
antonym_ids=None,
antonym_label=None,
synonym_ids=None,
synonym_label=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
replace_label (``torch.LongTensor`` of shape ``(batch_size,sequence_length)``, `optional`):
Labels for computing the token replace type prediction (classification) loss.
Indices should be in ``[0, 1, 2, 3, 4, 5, 6]``:
- 0 indicates the token is the original token,
- 1 indicates the token is replaced with the lemminflect token,
- 2 indicates the token is replaced with the synonym,
- 3 indicates the token is replaced with the hypernyms,
- 4 indicates the token is replaced with the adjacency,
- 5 indicates the token is replaced with the antonym,
- 6 indicates the token is replaced with the random word.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Masked Language Model
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
batch_size = input_ids.size(0) // 3
ori_seq, syn_ant_seq = sequence_output[:batch_size], sequence_output[batch_size:]
mlm_labels, tec_labels = labels[:batch_size], labels[batch_size:]
mlm_scores = self.mlm_head(ori_seq)
tec_scores = self.tokn_classifier(syn_ant_seq)
ori_sen, syn_sen, ant_sen = pooled_output[:batch_size], pooled_output[batch_size:batch_size*2], pooled_output[batch_size*2:]
ori_syn_rel = torch.sigmoid(torch.mean(ori_sen * syn_sen, dim=-1, keepdim=True))
ori_ant_rel = torch.sigmoid(torch.mean(ori_sen * ant_sen, dim=-1, keepdim=True))
sec_scores = torch.cat((ori_syn_rel, ori_ant_rel), dim=0)
sec_labels = torch.cat((torch.ones(batch_size), torch.zeros(batch_size)), dim=0).to(labels.device)
total_loss = None
if labels is not None:
loss_tok = CrossEntropyLoss()
mlm_loss = loss_tok(mlm_scores.view(-1, self.config.vocab_size), mlm_labels.view(-1))
tec_loss = loss_tok(tec_scores.view(-1, self.config.num_token_error), tec_labels.view(-1))
loss_sen = BCELoss()
sec_loss = loss_sen(sec_scores.view(-1), sec_labels.view(-1))
# total_loss = mlm_loss + tec_loss + sec_loss
total_loss = torch.exp(-self.log_vars[0]) * mlm_loss + torch.clamp(self.log_vars[0], min=0) + \
torch.exp(-self.log_vars[1]) * tec_loss + torch.clamp(self.log_vars[1], min=0) + \
torch.exp(-self.log_vars[2]) * sec_loss + torch.clamp(self.log_vars[2], min=0)
#print(mlm_loss.item(), tec_loss.item(), sec_loss.item())
if not return_dict:
output = (mlm_scores,) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return LecbertForPretrainingOutput(
loss=total_loss,
prediction_logits=mlm_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@dataclass
class LecbertForPretrainingOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
{"/src/lecbert/__init__.py": ["/src/lecbert/configuration.py", "/src/lecbert/datacollator.py", "/src/lecbert/modeling.py", "/src/lecbert/tokenization.py"]}
|
30,418
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__pyfimm.py
|
'''
pyFIMM - main module
See help on the main module, `help(pyFIMM)`, for usage info.
In this file are the pyFIMM global parameters - set_wavelength, set_N etc.
See __Classes.py for the higher-level classes, such as Project, Node, Material, Layer, Slice and Section.
Waveguide, Circ and Device classes/functions are in their respective separate files.
'''
'''See file __Waveguide.py for the Waveguide class & rectangular WG funcs.
-- Demis 2014-12-31'''
'''See file __Mode.py for the Mode class.
-- Demis 2014-12-31 '''
'''See file __Device.py for the Device class.
-- Demis 2014-12-31 '''
'''See file __Circ.py for Circ class & other cylindrical waveguide functions, such as Cylindrical global params (Np, Nm etc.).
-- Demis 2015-01-03'''
''' See file __Tapers.py for Taper class & WG Lens class & related functions.
-- Demis 2015-01-26'''
#import PhotonDesignLib.pdPythonLib as pd # moved into __globals.py to eliminate circular import
#fimm = pd.pdApp()
#fimm.ConnectToApp() # moved into connect()
from __globals import * # import global vars & FimmWave connection object `fimm`
from __Classes import * # import higher-level classes
#import numpy as np
#import datetime as dt # for date/time strings
import os.path # for path manipulation
####################################################################################
# Fimmwave General Functions
####################################################################################
def connect(hostname='localhost', port=5101):
'''Open connection to the Fimmwave application.
Parameters
----------
hostname : string, optional; address/hostname to computer (default= 'localhost')
port : int, optional; port on host computer (default= 5101)
calls pdPythonLib.ConnectToApp(hostname = 'localhost',portNo = 5101)
'''
#in pdPythonLib: ConnectToApp(self,hostname = 'localhost',portNo = 5101)
fimm.ConnectToApp(hostname=hostname, portNo=port)
'''Check the connection: '''
try:
NumSubnodes = int( fimm.Exec("app.numsubnodes()") )
print "Connected! (%i Project nodes found)"%NumSubnodes
except:
ErrStr = "Unable to connect to Fimmwave app - make sure it is running & license is active."
raise IOError(ErrStr)
def disconnect():
'''Terminate the connection to the FimmWave Application & delete the object.'''
global pd # use this module-level variable. Dunno why the `global` declaration is only needed in THIS module function (not others!), in order to delete it...
del pd # pdPythonLib does some cleanup upon del()'ing
def exitfimmwave():
'''Closes the Fimmwave app'''
fimm.Exec("app.exit")
def Exec(string, vars=[]):
'''Send a raw command to the fimmwave application.
`vars` is an optional list of arguments for the command.
See `help(<pyfimm>.PhotonDesignLib.pdPythonLib.Exec)` for more info.'''
out = fimm.Exec(string, vars)
if isinstance(out, list): out = strip_array(out)
if isinstance(out, str): out = strip_text(out)
'''if fimm.Exec returned a string, FimmWave usually appends `\n\x00' to the end'''
#if out[-2:] == '\n\x00': out = out[:-2] # strip off FimmWave EOL/EOF chars.
return out
def close_all(warn=True):
'''Close all open Projects, discarding unsaved changes.
Parameters
----------
warn : { True | False }, optional
True by default, which will prompt user for confirmation.
'''
nodestring="app" # top-level, deleting whole Projects
N_nodes = int( fimm.Exec(nodestring+".numsubnodes()") )
wstr = "Will close" if warn else "Closing"
WarnStr = "WARNING: %s all the following open Projects,\n\tdiscarding unsaved changes:\n"%(wstr)
SNnames = [] #subnode names
for i in range(N_nodes):
SNnames.append( strip_txt( fimm.Exec(nodestring+r".subnodes["+str(i+1)+"].nodename()") ) )
WarnStr = WarnStr + "\t%s\n"%(SNnames[-1])
print WarnStr
if warn:
# get user confirmation:
cont = raw_input("Are you sure? [y/N]: ").strip().lower()
else:
cont = 'y'
if cont == 'y':
fString = ''
for i in range(N_nodes):
fString += nodestring + ".subnodes[1].close()\n"
fimm.Exec( fString )
else:
print "close_all(): Cancelled."
#end close_all()
####################################
# Fimmwave Global Parameters ####
####################################
def set_working_directory(wdir):
'''Set FimmWave working directory. Usually set to same dir as your Python script in order to find FimmWave output files.'''
#if DEBUG(): print "set_working_directory(): sending setwdir() command:"
fimm.Exec("app.setwdir("+str(wdir)+")")
#if DEBUG(): print "set_working_directory(): finished setwdir()."
def get_working_directory():
'''Get fimmwave working directory, as string.'''
print "Warning: wdir string may not be in standard format."
return fimm.Exec("app.wdir")[:-2] # strip off the last two EOF characters
def set_wavelength(lam0):
'''Set the simulated optical wavelength (microns).'''
fimm.Exec("app.defaultlambda = {"+str(lam0)+"}")
def get_wavelength():
'''Return the simulation's optical wavelength (microns).'''
return fimm.Exec("app.defaultlambda")
def wavelength():
'''Backwards compatibility only.
Return the simulation's optical wavelength (microns).'''
print "DeprecationWarning: Use get_wavelength() instead."
return get_wavelength()
def set_material_database(path):
'''Set the path to the material database (*.mat) file. Only needed if you are defining materials using this database ('mat'/material type waveguides instead of 'rix'/refractive index). This sets a global materials file that will be used in every waveguide and device that is built.
Although waveguide nodes can specify their own (different) materials files, it is recommended that a global file be used instead since FimmProp Devices do not accept multiple materials files (to avoid confusion and identically-named materials from different files). The single global file can be set to `include` any other materials files.
Parameters
----------
path : string
Absolute or relative path to the material database file. `path` will be automatically converted to an absolute path, as a workaround to a FimmProp Device Node bug that causes it to only accept absolute paths.
'''
global global_matDB
import os
path = os.path.abspath(path) # convert to absolute path
if os.path.isfile(path):
global_matDB = str(path)
else:
ErrStr = "Material database file does not exist at the specified path `%s`" %(path)
raise IOError(ErrStr)
if DEBUG(): print "matDB = ", global_matDB
def get_material_database():
'''Get path to global material database file.
Returns
-------
path : string
Absolute path to the material database file that will be used when building nodes.
'''
global global_matDB
try:
global_matDB
except:
if DEBUG(): print "unset global_matDB --> None"
global_matDB = None
return global_matDB
############################################
#### Mode Solver Parameters ####
############################################
def set_eval_type(eval_type):
'''FIMMWAVE will label modes by the effective index (n_eff) or propagation constant (beta).
Parameters
----------
eval_type : { 'n_eff' | 'beta' }, case insensitive
Equivalent strings for 'n_eff': 'neff', 'effective index'
Equivalent strings for 'beta': 'propagation constant'
Examples
--------
>>> set_eval_type("n_eff")
'''
if eval_type.lower() == 'n_eff' or eval_type.lower() == 'neff' or eval_type.lower() == 'effective index':
fimm.Exec("app.evaltype = 1")
elif eval_type.lower() == 'beta' or eval_type.lower() == 'propagation constant':
fimm.Exec("app.evaltype = 0")
else:
raise ValueError('invalid input for eval_type')
def get_eval_type():
'''Return the string "n_eff" or "beta" corresponding to the FimmWave mode labelling scheme. See also set_eval_type()'''
eval_type = fimm.Exec("app.evaltype")
if eval_type == 1:
return 'n_eff'
elif eval_type == 0:
return 'beta'
else:
return ''
def eval_type():
'''Backwards compatibility only.
Use get_eval_type() instead.'''
print "eval_type(): DeprecationWarning: Use get_eval_type() instead."
return get_eval_type()
def set_mode_finder_type(mode_finder_type):
'''options: "stable" or "fast", passed as string.'''
if mode_finder_type.lower() == 'stable':
fimm.Exec("app.homer_opt = 1")
elif mode_finder_type.lower() == 'fast':
fimm.Exec("app.homer_opt = 0")
else:
print 'invalid input for mode_finder_type'
def get_mode_finder_type():
'''returns: "stable" or "fast" as string.
Corresponds to the fimmwave parameter: app.homer_opt
'''
mode_finder_type = fimm.Exec("app.homer_opt")
if mode_finder_type == 1:
return 'stable'
elif mode_finder_type == 0:
return 'fast'
else:
return ''
def mode_finder_type():
'''Backwards compatibility only. Should Instead get_***().'''
print "Deprecation Warning: mode_finder_type(): Use get_mode_finder_type() instead."
return get_mode_finder_type()
def set_solver_speed(string):
'''options: 'best' (default) or 'fast'
used to set the fimmwave param:
>>> NodeStr.evlist.mpl.speed = <solverspeed>'''
global global_solver_speed
if string.lower() == 'best':
global_solver_speed = 0
elif string.lower() == 'fast':
global_solver_speed = 1
else:
print 'invalid input for mode_finder_type'
def get_solver_speed():
'''Returns 'best' or 'fast' as string.
Defaults to 'best', if unset. '''
global global_solver_speed
try:
global_solver_speed
except NameError:
global_solver_speed = 0 # default value if unset
if global_solver_speed==0:
return 'best'
elif global_solver_speed==1:
return 'fast'
return global_solver_speed
def set_mode_solver(solver):
'''Set the mode solver. Takes few words as string.
Parameters
----------
solver : string, case insensitive
For rectangular waveguides, use a combination of following to create the three-keyword string:
"vectorial/semivecTE/semivecTM FDM/FMM real/complex"
FDM = Finite Difference Method
FMM = Field Mode Matching method
Both of these solvers take all permutations of vectoriality & real/complex.
eg. "semivecTE FMM complex" or "vectorial FDM real"
For Cylindrical Waveguides, use any of these options:
"vectorial/semivecTE/semivecTM FDM/GFS/Gaussian/SMF real/complex"
where the FDM solver is always "vectorial", and real/complex is only applicable to the FDM solver. GFS takes 'vectorial' or 'scalar' but not 'semivec'. Inapplicable keywords will raise an error in FimmWave.
FDM = Finite-Difference Method
GFS = General Fiber Solver
Gaussian = Gaussian Mode Fiber solver (unsupported)
SMF = Single-Mode Fiber
For Cylindrical Waveguides, here are all the possible options:
Finite-Difference Method solver: "vectorial FDM real" , "vectorial FDM complex",
General Fiber Solver: "vectorial GFS real" , "scalar GFS real",
Single-Mode Fiber solver: "Vectorial SMF" , "SemivecTE SMF" , "SemivecTM SMF",
Gaussian Fiber Solver (unsupported): "Vectorial Gaussian" , "SemivecTE Gaussian" , "SemivecTM Gaussian".
'''
global global_mode_solver
parts = solver.split()
if len(parts) > 3 or len(parts)==0: raise ValueError( "Expected string separated by spaces, with max 3 words.\n`slvr`="+str( solver ) )
#### should do a RegEx to parse the mode solver params, so order or terms is arbitrary
# Find the mode solver type first?
# Only set the parts needed - eg. if only called set_modesolver('SemivecTE') should still use default modesolver, but only change to TE.
global_mode_solver = solver
def get_mode_solver():
'''Return mode solver as string.
Returns
-------
mode_solver : string
String representation of the mode solver to use. Returns `None` if unset, and default modesolver for each waveguide type will be used.
See set_mode_solver() for available parameters.
Returns <None> if unset.
'''
global global_mode_solver
try:
global_mode_solver
except NameError:
global_mode_solver = None
return global_mode_solver
def mode_solver():
'''Backwards compatibility only. Should Instead get_***().'''
print "Deprecation Warning: mode_solver(): Use get_mode_solver() instead."
return get_mode_solver()
def set_NX(mnx):
'''Set # of horizontal grid points.
Parameters
----------
mnx : int
Number of horizontal grid points in mode representation/solver (depending on solver). Defaults to 60.
'''
global global_NX
global_NX = mnx
def get_NX():
'''Return # of horizontal grid points. Defaults to 60.'''
global global_NX
try:
global_NX
except NameError:
global_NX = 60
return global_NX
def NX():
'''Backwards compatibility only. Should Instead use get_NX().'''
print "Deprecation Warning: NX(): Use get_NX() instead."
return get_NX()
def set_NY(mny):
'''Set # of vertical grid points
Parameters
----------
mny : int
Number of horizontal grid points in mode representation/solver (depending on solver). Defaults to 60.'''
global global_NY
global_NY = mny
def get_NY():
'''Return # of vertical grid points. Defaults to 60.'''
global global_NY
try:
global_NY
except NameError:
global_NY = 60
return global_NY
def NY():
'''Backwards compatibility only. Should Instead use get_NY().'''
print "Deprecation Warning: NY(): Use get_NY() instead."
return get_NY()
def set_N(mn):
'''Set # of modes to solve for.
For cylindrical waveguides, this sets the number of Axial Quantum Number modes to solve for. set_Np() chooses the polarization modes.
Parameters
----------
mn : int >=1
Number of modes to solve for. Defaults to 10.'''
global global_N
global_N = mn
def get_N():
'''Return # of modes to solve for.
Defaults to 10 if unset.'''
global global_N
try:
global_N
except NameError:
global_N = 10
return global_N
def N():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: N(): Use get_N() instead."
return get_N()
def set_vertical_symmetry(symmtry):
global global_vertical_symmetry
global_vertical_symmetry = symmtry
def get_vertical_symmetry():
global global_vertical_symmetry
try:
global_vertical_symmetry
except NameError:
global_vertical_symmetry = None
return global_vertical_symmetry
def vertical_symmetry():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: vertical_symmetry(): Use get_vertical_symmetry() instead."
return get_vertical_symmetry()
def set_horizontal_symmetry(symmtry):
global global_horizontal_symmetry
global_horizontal_symmetry = symmtry
def get_horizontal_symmetry():
global global_horizontal_symmetry
try:
global_horizontal_symmetry
except NameError:
global_horizontal_symmetry = None
return global_horizontal_symmetry
def horizontal_symmetry():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: horizontal_symmetry(): Use get_horizontal_symmetry() instead."
return get_horizontal_symmetry()
def set_min_TE_frac(mintefrac):
'''Set minimum TE fraction to constrain mode solver to a particular polarization.'''
global global_min_TE_frac
global_min_TE_frac = mintefrac
def get_min_TE_frac():
'''Return minimum TE fraction. Defaults to 0.'''
global global_min_TE_frac
try:
global_min_TE_frac
except NameError:
global_min_TE_frac = 0
return global_min_TE_frac
def min_TE_frac():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: min_TE_frac(): Use get_min_TE_frac() instead."
return get_min_TE_frac()
def set_max_TE_frac(maxtefrac):
'''Set maximum TE fraction to constrain mode solver to a particular polarization.'''
global global_max_TE_frac
global_max_TE_frac = maxtefrac
def get_max_TE_frac():
'''Return maximum TE fraction.'''
global global_max_TE_frac
try:
global_max_TE_frac
except NameError:
global_max_TE_frac = 100
return global_max_TE_frac
def max_TE_frac():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: max_TE_frac(): Use get_max_TE_frac() instead."
return get_max_TE_frac()
def set_min_EV(min_ev):
global global_min_ev
global_min_ev = min_ev
def get_min_EV():
global global_min_ev
try:
global_min_ev
except NameError:
global_min_ev = None
return global_min_ev
def min_EV():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: min_EV(): Use get_min_EV() instead."
return get_min_EV()
def set_max_EV(max_ev):
global global_max_ev
global_max_ev = max_ev
def get_max_EV():
global global_max_ev
try:
global_max_ev
except NameError:
global_max_ev = None
return global_max_ev
def max_EV():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: max_EV(): Use get_max_EV() instead."
return get_max_EV()
def set_RIX_tol(rixTol):
global global_rix_tol
global_rix_tol = rixTol
def get_RIX_tol():
global global_rix_tol
try:
global_rix_tol
except NameError:
global_rix_tol = None
return global_rix_tol
def RIX_tol():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: RIX_tol(): Use get_RIX_tol() instead."
return get_RIX_tol()
def set_N_1d(n1d):
'''# of 1D modes found in each slice (FMM solver only)'''
global global_n1d
global_n1d = n1d
def get_N_1d():
'''Return # of 1D modes found in each slice (FMM solver only)'''
global global_n1d
try:
global_n1d
except NameError:
global_n1d = None
return global_n1d
def N_1d():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: N_1d(): Use get_N_1d() instead."
return get_N_1d()
def set_mmatch(match):
'''
Parameters
----------
match : float
See Fimmwave Manual section 5.4.12.
If mmatch is set to zero then it will be chosen automatically.
If mmatch is set to e.g. 3.5 then the interface will be set in the center of the third slice from the left.
'''
global global_mmatch
global_mmatch = match
def get_mmatch():
'''Return mmatch - see set_mmatch() for more info.'''
global global_mmatch
try:
global_mmatch
except NameError:
global_mmatch = None
return global_mmatch
def mmatch():
'''Backwards compatibility only. Should Instead use get_***().'''
print "Deprecation Warning: mmatch(): Use get_mmatch() instead."
return get_mmatch()
def set_temperature(temp):
'''
Parameters
----------
temp : float
Set global temperature in degrees Celsius. Eventually, will be able to set temperature per-Waveguide to override this. If unset, the temperature is left to the FimmWave default.
'''
print "WARNING: set_temperature(): Not implemented yet! Does not currently set the temperature in FimmWave nodes."
global global_temperature
global_temperature = temp
def get_temperature():
'''Return global temperature in degrees Celsius. Returns <None> if unset.'''
global global_temperature
try:
global_temperature
except NameError:
global_temperature = None
return global_temperature
#end get_temperature
def get_amf_data(modestring, filename="temp", precision=r"%10.6f", maxbytes=500):
'''Return the various mode profile data from writing an AMF file.
This returns data for all field components of a mode profile, the start/end x/y values in microns, number of data points along each axis and some other useful info.
The AMF file and accompanying temporary files will be saved into the directory designated by the variable `AMF_Folder_Str()`, which is typically something like "pyFIMM_temp/".
Temporary files are created in order to extract the commented lines.
This function currently does NOT return the field vlaues, as they are much more efficiently acquired by the FimMWave functions get_field()
Parameters
----------
modestring : str
The entire FimmWave string required to produce the amf file, omitting the ".writeamf(...)" function itself, typically a reference to the individual mode to be output. An example would be:
app.subnodes[7].subnodes[1].evlist.list[1].profile.data
filename : str, optional
Desired filename for the AMF-file & output.
precision : str, optional
String passed to the FimmWave function `writeamf()` to determine output precision of field values, as a standard C-style format string. Defaults to "%10.6f", specifying a floating point number with minimum 10 digits and 6 decimal points.
maxbytes : int, optional
How many bytes to read from the AMF file. This prevents reading all the field data, and speeds up execution/memory usage. Defaults to 500 bytes, which typically captures the whole AMF file header info.
Returns
-------
A dictionary is returned containing each value found in the AMF file header.
{'beta': (5.980669+0j), # Beta (propagation constant), as complex value
'hasEX': True, # does the AMF file contain field values for these components?
'hasEY': True,
'hasEZ': True,
'hasHX': True,
'hasHY': True,
'hasHZ': True,
'isWGmode': True, # is this a waveguide mode?
'iscomplex': False, # are the field values (and Beta) complex?
'lambda': 1.55, # wavelength
'nx': 100, # Number of datapoints in the x/y directions
'ny': 100,
'xmax': 14.8, # x/y profile extents, in microns
'xmin': 0.0,
'ymax': 12.1,
'ymin': 0.0}
Examples
--------
>>> ns = "app.subnodes[7].subnodes[1].evlist.list[1].profile.data"
>>> fs = "pyFIMM_temp\mode1_pyFIMM.amf"
>>> data = pf.get_amf_data(ns, fs)
'''
'''
100 100 //nxseg nyseg
0.000000 14.800000 0.000000 12.100000 //xmin xmax ymin ymax
1 1 1 1 1 1 //hasEX hasEY hasEZ hasHX hasHY hasHZ
6.761841 0.000000 //beta
1.550000 //lambda
0 //iscomplex
1 //isWGmode
'''
import re # RegEx module
# write an AMF file with all the field components.
if not filename.endswith(".amf"): filename += ".amf" # name of the files
# SubFolder to hold temp files:
if not os.path.isdir(str( AMF_FolderStr() )):
os.mkdir(str( AMF_FolderStr() )) # Create the new folder if needed
mode_FileStr = os.path.join( AMF_FolderStr(), filename )
if DEBUG(): print "Mode.plot(): " + modestring + ".writeamf("+mode_FileStr+",%s)"%precision
fimm.Exec(modestring + ".writeamf("+mode_FileStr+",%s)"%precision)
## AMF File Clean-up
#import os.path, sys # moved to the top
fin = open(mode_FileStr, "r")
if not fin: raise IOError("Could not open '"+ mode_FileStr + "' in " + sys.path[0] + ", Type: " + str(fin))
#data_list = fin.readlines() # put each line into a list element
data_str = fin.read( maxbytes ) # read file as string, up to maxbytes.
fin.close()
out = {} # the data to return, as dictionary
''' Grab the data from the header lines '''
# how much of the data to search (headers only):
s = [0, 2000] # just in case the entire file gets read in later, to grab field data
# should disable this once we know we don't need the AMF field data
# Set regex pattern to match:
''' 100 100 //nxseg nyseg'''
pat = re.compile( r'\s*(\d+)\s*(\d+)\s*//nxseg nyseg' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'segment counts found:', m.groups() #groups() prints all captured groups
nx = int( m.group(1) ) # grab 1st group from RegEx & convert to int
ny = int( m.group(2) )
print '(nx, ny) --> ', nx, ny
out['nx'],out['ny'] = nx, ny
# Set regex pattern to match:
''' 0.000000 14.800000 0.000000 12.100000 //xmin xmax ymin ymax'''
pat = re.compile( r'\s*(\d+\.?\d*)\s*(\d+\.?\d*)\s*(\d+\.?\d*)\s*(\d+\.?\d*)\s*//xmin xmax ymin ymax' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'window extents found:',m.groups() #groups() prints all captured groups
xmin = float( m.group(1) ) # grab 1st group from RegEx & convert to int
xmax = float( m.group(2) )
ymin = float( m.group(3) )
ymax = float( m.group(4) )
print '(xmin, xmax, ymin, ymax) --> ', xmin, xmax, ymin, ymax
out['xmin'],out['xmax'],out['ymin'],out['ymax'] = xmin, xmax, ymin, ymax
# Set regex pattern to match:
''' 1 1 1 1 1 1 //hasEX hasEY hasEZ hasHX hasHY hasHZ'''
pat = re.compile( r'\s*(\d)\s*(\d)\s*(\d)\s*(\d)\s*(\d)\s*(\d)\s*//hasEX hasEY hasEZ hasHX hasHY hasHZ' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'components found:',m.groups() #groups() prints all captured groups
hasEX = bool( int(m.group(1)) ) # grab 1st group from RegEx & convert to int
hasEY = bool( int(m.group(2)) )
hasEZ = bool( int(m.group(3)) )
hasHX = bool( int(m.group(4)) )
hasHY = bool( int(m.group(5)) )
hasHZ = bool( int(m.group(6)) )
print '(hasEX, hasEY, hasEZ, hasHX, hasHY, hasHZ) --> ', hasEX, hasEY, hasEZ, hasHX, hasHY, hasHZ
out['hasEX'],out['hasEY'],out['hasEZ'],out['hasHX'],out['hasHY'],out['hasHZ'] \
= hasEX, hasEY, hasEZ, hasHX, hasHY, hasHZ
# Set regex pattern to match:
''' 6.761841 0.000000 //beta'''
pat = re.compile( r'\s*(\d+\.?\d*)\s*(\d+\.?\d*)\s*//beta' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'beta found:',m.groups() #groups() prints all captured groups
beta_r = float( m.group(1) ) # grab 1st group from RegEx & convert to int
beta_i = float( m.group(2) )
beta = beta_r + beta_i*1j
print 'beta --> ', beta
out['beta'] = beta
# Set regex pattern to match:
''' 1.550000 //lambda'''
pat = re.compile( r'\s*(\d+\.?\d*)\s*//lambda' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'lambda found:',m.groups() #groups() prints all captured groups
lam = float( m.group(1) ) # grab 1st group from RegEx & convert to int
print 'lambda --> ', lam
out['lambda'] = lam
# Set regex pattern to match:
''' 0 //iscomplex'''
pat = re.compile( r'\s*(\d)\s*//iscomplex' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'iscomplex found:',m.groups() #groups() prints all captured groups
iscomplex = bool( int(m.group(1)) ) # grab 1st group from RegEx & convert to int
print 'iscomplex --> ', iscomplex
out['iscomplex'] = iscomplex
# Set regex pattern to match:
''' 1 //isWGmode'''
pat = re.compile( r'\s*(\d)\s*//isWGmode' )
m = pat.search( data_str[s[0]:s[1]] ) # perform the search
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
print 'isWGmode found:',m.groups() #groups() prints all captured groups
isWGmode = bool( int(m.group(1)) ) # grab 1st group from RegEx & convert to int
print 'isWGmode --> ', isWGmode
out['isWGmode'] = isWGmode
return out
"""
# Delete File Header
nxy_data = data_list[1]
xy_data = data_list[2]
slvr_data = data_list[6]
del data_list[0:9]
# strip the comment lines from the nxy file:
nxyFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_nxy.txt")
fout = open(nxyFile, "w")
fout.writelines(nxy_data)
fout.close()
nxy = pl.loadtxt(nxyFile, comments='//')
nx = int(nxy[0])
ny = int(nxy[1])
xyFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_xy.txt")
fout = open(xyFile, "w")
fout.writelines(xy_data)
fout.close()
xy = pl.loadtxt(xyFile, comments='//')
slvrFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_slvr.txt")
fout = open(slvrFile, "w")
fout.writelines(slvr_data)
fout.close()
iscomplex = pl.loadtxt(slvrFile, comments='//')
# Find Field Component
if field_cpt_in == None:
'''If unspecified, use the component with higher field frac.'''
tepercent = fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.tefrac")
if tepercent > 50:
field_cpt = 'Ex'.lower()
else:
field_cpt = 'Ey'.lower()
#end if(field_cpt_in)
if field_cpt == 'Ex'.lower():
data = data_list[1:nx+2]
elif field_cpt == 'Ey'.lower():
data = data_list[(nx+2)+1:2*(nx+2)]
elif field_cpt == 'Ez'.lower():
data = data_list[2*(nx+2)+1:3*(nx+2)]
elif field_cpt == 'Hx'.lower():
data = data_list[3*(nx+2)+1:4*(nx+2)]
elif field_cpt == 'Hy'.lower():
data = data_list[4*(nx+2)+1:5*(nx+2)]
elif field_cpt == 'Hz'.lower():
data = data_list[5*(nx+2)+1:6*(nx+2)]
else:
ErrStr = 'Invalid Field component requested: ' + str(field_cpt)
raise ValueError(ErrStr)
del data_list
# Resave Files
fout = open(mode_FileStr+"_"+field_cpt.strip().lower(), "w")
fout.writelines(data)
fout.close()
# Get Data
if iscomplex == 1:
field_real = pl.loadtxt(mode_FileStr, usecols=tuple([i for i in range(0,2*ny+1) if i%2==0]))
field_imag = pl.loadtxt(mode_FileStr, usecols=tuple([i for i in range(0,2*ny+2) if i%2!=0]))
else:
field_real = pl.loadtxt(mode_FileStr)
"""
#end get_amf_data()
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,419
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__CavityMode.py
|
'''
pyFIMM.CavityMode
Operations on Cavity Modes (modes vs. Z). Created when the user requests:
>>> CavityObj.mode(0)
: returns a <pyFIMM CavityMode object>
Demis D. John, 2015, Praevium Research Inc.
To Do:
------
- Cavity.plot()
- plot lateral fields?
- zmin & zmax - account for LHS_Dev & RHS_Dev lengths etc.
'''
from __globals import * # import global vars & FimmWave connection object
# DEBUG() variable is also set in __globals
import numpy as np
import math
#from __pyfimm import DEBUG() # Value is set in __pyfimm.py
from __pyfimm import get_N, set_wavelength # get number of calculated modes
######## For Cavity.mode(n)... ########
class CavityMode(object):
'''CavityMode( CavityObj, ModeNum )
Class for selecting a Cavity Mode, similar to the Mode class used in `WG.mode(0)`.
Typically created via Cavity's `mode()` method, like so:
>>> Cavity.mode(0).plot()
Since Cavity.mode() returns a CavityMode object, this calls CavityMode.plot()
Parameters
----------
CavityObj : pyFIMM.Cavity object
The Cavity object to perform operations on.
ModeNum : integer, list of integers, or the string 'all', optional.
The Cavity mode number to work on. Default is 0.
May pass multiple modenumbers in a list, eg. `CavityMode([0,1,2])`
If the string 'all' (case insensitive) is passed, data will be returned for all calculated modes (as specified by get_N() - the number of calculated lateral modes per Section/Circ).
Attributes
----------
modenum : int or 'all'
Which lateral mode to manipulate.
wavelengths, eigenvalues, eigenvectors : numpy array
Wavelengths (passed to `Cavity.calc()`) & corresponding eigenvalues/eigenvectors at each.
The eigenvectors are the magnitudes/phases of each lateral mode needed in order to produce the resonant cavity field. The lateral modes (up to get_N() ) are the basis set of the eigenvalue problem.
For eigenvalues & eigenvectors, indexing is like so:
>>> eigenvalues[Imodenum][Iwavelength]
Where `wavelengths[Iwavelength]` tells you which wavelength you're inspecting, and `Imodenum` tells you which mode number you're inspecting.
Methods
-------
Please see help on a specific function via `help(CavityMode.theFunc)` for detailed up-to-date info on accepted arguments etc.
get_resonance_wavelengths():
Returns resonance wavelength(s) for selected modes.
`get_resonance_wavelength()` is a synonym.
get_resonance_eigenvalues():
Returns resonance eigenvalues(s) (the round-trip amplitude & phase applied to a field) for this mode.
`get_resonance_eigenvalue()` is a synonym.
get_resonance_eigenvectors():
Returns resonance eigenvectors(s) (the magnitudes/phases of each central-section mode to get the above eigenvalues) for this mode.
`get_resonance_eigenvector()` is a synonym.
plot( component ):
Plot a component of this mode.
Supported components include:
'EigVals' - plot Eigenvalues versus wavelength.
Ex, Ey, Ez - Electric fields versus Z.
Hx, Hy, Hz - Magnetic Fields versus Z.
Px, Py, Pz - Poynting Vectors versus Z.
'index' or 'rix' - refractive index of cavity versus Z.
See `help(CavityMode.plot)` or `help(CavityObj.mode(0).plot)` for full help on the function, as there are more important details than mentioned here.
get_cavity_loss():
NOT IMPLEMENTED YET.
Return the cavity loss (equivalent to threshold gain) for this mode.
Examples
--------
CavityMode objects are typically Not called/instantiated from the CavityModes class directly, but instead as a sub-object of a Cavity `mode` method like so:
>>> CavityObj.mode(0).plot()
where `CavityObj.mode(0)` is the method `mode()` of the CavityObject which returns a CavityMode object (initialized with modenum=0), and `.plot()` is a method of this CavityMode object.
'''
def __init__(self, CavObj, num):
'''Takes Cavity object `CavObj` as input, and mode number `num` (default=0).
Optionally, if num == 'all' will return data on all modes.'''
self.Cavity = CavObj
if isinstance(num, str):
if num.lower() == 'all':
#num = -1 # plot all modes
self.modenum = range(0, get_N() ) # list of each modenumber calc'd
else:
ErrStr = 'CavityMode: Mode Number must be an integer, list of integers, or the string "all".'
raise ValueError(ErrStr)
elif isinstance(num, int):
self.modenum = [num] # put num into a list
else:
try:
self.modenum = [int(x) for x in num] # check that we're able to create a list of integers
except:
ErrStr = 'CavityMode: Mode Number must be an integer, list of integers, or the string "all".'
raise ValueError(ErrStr)
#end if(num)
self.eigenvalues = []
self.eigenvectors = []
self.wavelengths = []
self.__resonance_wavelength = []
self.__resonance_eigenvalue = []
self.__resonance_eigenvector = []
self.__resonance_loss = []
for num in self.modenum:
'''eigenvalues[i][ corresponds to the modenumber modenum[i]'''
try:
'''Make sure the Cavity has been calculated.'''
CavObj.eigenvalues
CavObj.eigenvectors
except AttributeError:
ErrStr = "EigenValues/EigenVectors not found - not calculated yet? Try calling `Cavity.calc()` first."
raise AttributeError(ErrStr)
self.eigenvalues.append( CavObj.eigenvalues[: , num] )
self.eigenvectors.append( CavObj.eigenvectors[: , num] )
self.wavelengths.append( CavObj.wavelengths ) # could just have one entry for this...
self.__resonance_wavelength.append( CavObj.resWLs[num] )
self.__resonance_eigenvalue.append( CavObj.resEigVals[num] )
self.__resonance_eigenvector.append( CavObj.resEigVects[num] )
self.__resonance_loss.append( CavObj.resLosses[num] )
#end __init__
def get_field(self, component, wavelength, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, direction=None, calc=True):
'''Return the field specified by `component`, versus Z.
2 arguments are requires, `component` and `wavelength`.
The fields returned are for the Cavity having input field set to the eigenvectors calculated at the given wavelength.
component : {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'Px' | 'Py' | 'Pz' | 'I' }, case-insensitive, required
Return the specified field component along the Z direction.
'E' is electric field, 'H' is magnetic field, 'P' is the Poynting vector, 'I' is Intensity, and 'x/y/z' chooses the component of each vector to return.
'index', 'rix' or 'ri' will return the refractive index, a functionality provided by the more convenient function `get_refractive_index()` but otherwise identical to this func. `wavelength` is ignored in this case.
wavelength : number or the string 'resonance'
If 'resonance' specified, will launch the resonance wavelength with maximum eigenvalue (min loss). Synonyms are 'res' and 'max', and these are all case-insensitive.
If a number is specified, that wavelength will be launched. The wavelength should be found in the list of calculated wavelengths (`Cavity.calc(wavelengths)`), found after `calc()` in the attribute `Cavity.wavelengths`.
direction = string { 'fwd', 'bwd', 'total' }, case insensitive, optional
DISABLED - now chosen based on LHS or RHS input.
Which field propagation direction to plot. Defaults to 'total'.
Note that the propagation direction should match up with which side the input field was launched. Eg. for `set_input([1,0,0], side="left")` you'll want to use `direction="fwd"`.
Synonyms for 'fwd' include 'forward' & 'f'.
Synonyms for 'bwd' include 'backward' & 'b'.
Synonyms for 'total' include 'tot' & 't'.
xcut, ycut = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
zpoints = integer, optional
Number of points to acquire in the field. Defaults to 3000.
zmin, zmax = float, optional
min & max z-coorinates. Defaults to 0-->Device Length.
calc = { True | False }
Tell FimmProp to calculate the fields? Only needs to be done once to store all field components & refractive indices (for a given `zpoints`, `xcut` etc.), so it is useful to prevent re-calculating after the first time.
cut = tuple of two floats - NOT IMPLEMENTED YET
Specify coordinate plane on which to plot fields. Default (0,0).
If dir='Z', then tuple is (x,y).
If dir='Y', then tuple is (x,z).
If dir='X', then tuple is (y,z).
Returns
-------
2-D List of complex values corresponding to field values, starting at z=0 and ending at specified `zmax`, for each specified modenumber.
1st dimension of List corresponds to the specified modenumbers. For example:
>>> f = CavObj.mode([1,3]).get_field('Ex', 'resonance')
Will return the list `f` with `f[0]` corresponding to mode(1) & `f[1]` corresponding to mode(3).
>>> f = CavObj.mode(2).get_field('Ex', 'resonance')
Will only have `f[0]`, corresponding to mode(2).
Examples
--------
Get the Total Ex field at x,y=(0,0) along Z, along the whole Cavity.
>>> field = Cav.get_field('Ex')
Get the refractive index at x,y=(0,0) along Z, along the whole Cavity.
>>> field = Cav.fields('index')
'''
wl = wavelength
zptsL=int(zpoints/2.); zptsR=np.round(zpoints/2.)
comp = component.lower().strip()
if comp == 'index' or comp == 'rix' or comp == 'ri':
'''Return refractive index - wavelength unimportant'''
Lfield = self.Cavity.LHS_Dev.get_field('rix', zpoints=zptsL, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc)
Rfield = self.Cavity.RHS_Dev.get_field('rix', zpoints=zptsR, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc)
Lfield.extend(Rfield) # concatenate the L+R fields
zfield=Lfield
else:
zfield=[] # to hold fields at each mode number
for num,M in enumerate(self.modenum):
'''num goes from 0-># of modes requested. M tells use the actual mode number.'''
if DEBUG(): print "CavityMode.plot(field): (num, M) = (", num, ",", M, ")"
# find index to the spec'd wavelength.
# `wl` is the passed argument, `WL` is the final wavelength
if isinstance(wl, str):
'''if 2nd arg, wl, is a string: '''
wl = wl.lower().strip() # to lower case + strip whitespace
if wl == 'resonance' or wl == 'res' or wl == 'max':
'''Find the resonant wavelength/eigval/eigvector'''
if DEBUG(): print "CavityMode.plot('res'): self.get_resonance_eigenvalues() = \n", self.get_resonance_eigenvalues()
if DEBUG(): print "CavityMode.plot('res'): self.get_resonance_wavelengths() = \n", self.get_resonance_wavelengths()
if self.__resonance_eigenvalue[num]==[None] or self.__resonance_wavelength[num]==None:
'''No resonance found for this mode'''
ErrStr = "No resonance found for mode %i, "%(M) + "can't plot via `resonance`."
raise UserWarning(ErrStr)
Iwl = np.argmax( np.real( self.__resonance_eigenvalue[num] ) )
WL = self.__resonance_wavelength[num][Iwl]
Iwl = np.where( np.array([WL]) == self.wavelengths[:][num] )[0] # set to index of all calc'd WL's, not just resonance WLs
print "CavityMode.plot('res'): Getting field at resonance mode @ %0.3f nm" %( WL )
if DEBUG(): print "Iwl=%s\nWL=%s"%(Iwl,WL)
else:
raise ValueError("CavityMode.plot(field): Unrecognized wavelength string. Please use 'resonance' or provide a wavelength in microns. See `help(CavityMode.plot)` for more info.")
else:
'''A specific wavelength (number) must have been passed: '''
WL = wl
Iwl = np.where( np.array([WL]) == self.wavelengths[num] )[0]
if not Iwl:
'''If wavelength not found in calculated WLs: '''
ErrStr = "CavityMode.plot(field): Wavelength `", WL, "` not found in among list of calculated wavelengths list (chosen during `Cavity.calc(wavelengths)`). See `help(CavityMode.plot)` for more info."
raise ValueError(ErrStr)
if DEBUG(): print "CavityMode.plot(): (num,Iwl)=(",num,",",Iwl,")"
EigVec = self.eigenvectors[num][Iwl[0]] # find eigenvector at given wavelength
# Launch this eigenvector:
norm = False
self.Cavity.RHS_Dev.set_input( EigVec, side='left', normalize=norm )
self.Cavity.RHS_Dev.set_input( np.zeros( get_N() ), side='right' ) # no input from other side
# Get mode vector reflected from RHS device & launch it into LHS dev, to accomplish one roundtrip
vec = self.Cavity.RHS_Dev.get_output_vector(side='left', direction='left')
self.Cavity.LHS_Dev.set_input( vec, side='right', normalize=norm )
self.Cavity.LHS_Dev.set_input( np.zeros( get_N() ), side='left' ) # no input from other side
# Get field values:
Lfielddir, Rfielddir = 'total','total'
self.Cavity.LHS_Dev.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
Lfield = self.Cavity.LHS_Dev.get_field(comp, zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction=Lfielddir, calc=False)
self.Cavity.RHS_Dev.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
Rfield = self.Cavity.RHS_Dev.get_field(comp, zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction=Rfielddir, calc=False)
Lfield.extend(Rfield) # concatenate the L+R fields
zfield.append(Lfield) # add field for this mode number
#end for(self.modenum)
#end if(comp==etc.)
return zfield
#end get_field()
# Alias for this func:
field = get_field
def plot(self, *args, **kwargs):
'''CavityMode.plot(component, [more options])
CavityMode.plot()
CavityMode.plot( 'EigVals' ) # plot eigenvalues versus wavelength
CavityMode.plot( 'Ex', 1.055 ) # plot cavity field Ex versus Z @ 1.055um wavelength
Plot the cavity modes.
If no arguments are provided, this will plot the calculated Eigenvalues versus wavelength.
However, if a field component is specified, the function will plot the cavity fields versus Z.
Parameters
----------
component : string (see below), case-insensitive, optional
Two different plot functionalities may be performed, depending on whether `component` specifies a field component or the eigenvalues of the cavity. The different functionality for either type of `component` specified is as follows:
component = 'EigVal' :
Plot EigenValues vs. wavelength (at the wavelengths determined by `Cavity.calc(wavelengths)` ).
This is the default if no argument passed. Synonyms for 'EigVal' are 'EigVals' & 'EigV'.
component = {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'I' | 'RIX'} :
Plot the specified field component along a specified direction.
"RIX", "RI" or "index" will plot only the refractive index vs. Z.
The 2nd argument must be the wavelength at which to plot the fields, or the string 'resonance'. The specified wavelength must be in the list of calculated wavelengths passed to `Cavity.calc(wavelengths)`. These wavelengths can be found in the list `CavityObj.wavelengths`. For example, you could get them directly from that list, like so:
>>> CavityObj.mode(0).plot( 'Ex', CavityObj.wavelengths[51] )
If the string 'resonance' is provided as the wavelength, then the wavelength with dominant resonance (max eigenvalue/min. loss) will be used. Synonyms for 'resonance' are 'res' & 'max', and the string is case-insensitive.
Other optional keywords for field plotting that may be provided are:
refractive_index = { True | False }
If True, will plot the refractive index of the structure on a second axis, with shared X-axis (so zooming etc. zooms both X axes). Default is False.
field_points = integer, optional
Number of points to acquire in a field plot. Defaults to 3000. The exact number of acquired points may vary by one of two points.
xcut, ycut = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
zmin, zmax = float, optional
min & max z-coorinates. Defaults to 0-->Device Length.
xpoint, ypoint = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
direction = string { 'fwd', 'bwd', 'total' }, case insensitive, optional
DISABLED: direction now chosen based on launch dir.
Which field propagation direction to plot. Defaults to 'bwd'.
cut = tuple of two floats - NOT IMPLEMENTED YET
Specify coordinate plane on which to plot fields. Default (0,0).
If dir='Z', then tuple is (x,y).
If dir='Y', then tuple is (x,z).
If dir='X', then tuple is (y,z).
return_handles = { True | False }
If True, will return handles to the figure, axes, legends and lines. False by default.
title = str, optional
Pre-pend some text to the plot title.
warn = bool
Display warnings? Defaults to True.
Returns
-------
handles : tuple of (fig1, axes, lines, leg)
If `return_handles=True`, returns matplotlib handles to the plot's objects, as so:
fig1 : main figure object
axes : Each axis. For field plots, if `refractive_index=True` then axes = ( Field_Axis , RI_Axis ), otherwise just = Field_Axis handles (or one axis for EigenValues).
lines : Each curve plotted. If `refractive_index=True` then lines = ( RI_line, Field_Line_Mode_0, Field_Line_Mode_1 , ... Field_Line_Mode_N ), otherwise handle RI_Line is omitted.
For EigenValue plots, `lines = (EigV_real_lines, EigV_imaginary_lines, Resonance_lines)`, with each being a list with a line per mode. Resonance_lines are the vertical lines indicating resonance wavelengths, which itself is a list of lists - `Resonance_lines[modenum][resonance_num]`, since there can be multiple resonances for each mode.
leg : legend of main Field/EigV axis, containing one legend entry for each mode number.
Examples
--------
Typically Not called/instantiated from CavityModes class directly, but instead as a sub-object of a Cavity mode object like so:
>>> CavityObj.mode(0).plot('EigVal')
where `CavityObj.mode(0)` returns a CavityMode object (initialized with modenum=0), and `.plot` is a method of this CavityMode object.
Plot Eigenvalues vs. Wavelength for a few lateral (waveguide) modes:
>>> CavityObj.mode( [0,2] ).plot('EigVal')
>>> CavityObj.mode( 'all' ).plot('EigVal') # plot all Mode's EigV's on one plot
>>> CavityObj.mode( 0 ).plot('EigVal') # plot only 1st mode's Eigenvalues
Plot Fields of the Cavity Mode:
>>> CavityObj.mode( 0 ).plot('Ex', 'resonance') # plot Ex for strongest resonance of Mode 0
>>> CavityObj.mode( 'all' ).plot('Hy', 1.550) # plot Hy for all modes on one plot, at wavelength 1.550 (may not be resonant, so fields may be discontinuous @ Cavity cut)
>>> CavityObj.mode( 0 ).plot('Ex', 'resonance', refractive_index=True) # plot Ex for strongest resonance of Mode 0, with Refractive Index profile plotted on separate axis
>>> fig, axis, line, leg = CavityObj.mode( 0 ).plot('Ex', 'res', return_handles=True) # plot Ex for strongest resonance, and return matplotlib handles to the figure's elements
'''
import matplotlib.pyplot as plt # there is no extra overhead to re-import a python module
# parse keyword args:
return_handles = kwargs.pop('return_handles', False)
title = kwargs.pop('title', None)
warn = kwargs.pop('warn', True)
''' Unused Kwargs are returned at the end of the plot() func.'''
if len(args)==0:
comp = 'eigval'
else:
if isinstance(args[0], str):
comp = args[0].lower().strip() # make lower case, strip whitespace
else:
ErrStr = "CavityMode.plot(component): expected `component` to be a string, but instead got: " + str(type(component)) + " : " + str(component)
raise ValueError(ErrStr)
#end if(args)
# Perform different plots depending on requested component `comp`:
#eigvstr = ['eigval', 'eigvals', 'eigv'] # possible strings for EigenValue plotting
fieldstrs = ['ex','ey','ez','hx','hy','hz','i','rix','ri','index'] # possible strings for field plotting
'''
-----------------------------------
First case: Plot the Eigenvalues
-----------------------------------
'''
if comp == 'eigval' or comp == 'eigvals' or comp == 'eigv':
'''Plot the eigenvalues'''
fig1, ax1 = plt.subplots(1, 1)
box = ax1.get_position()
ax1.set_position([ box.x0, box.y0, box.width * 0.8, box.height]) # reduce axis width to 80%, to make space for legend
l1 = []; l2 = []
vlines_out=[]
for num,M in enumerate(self.modenum):
'''num goes from 0-># of modes requested. M tells use the actual mode number.'''
#if DEBUG(): print "CavityMode.plot: num in modenum = ", num, type(num), " in ", self.modenum, type(self.modenum)
if len(self.eigenvalues[num]) == 0: raise UserWarning("No EigenValues found for mode %i!" %M +" Cavity modes not calculated yet? Please run Cavity.calc() to do so.")
EigsArray = self.eigenvalues[num]
WLs = self.wavelengths[num]
#l1 = []; l2 = []; leg1 = []; leg2=[]
l1.extend( ax1.plot(WLs, EigsArray.real, '-x', label="%i: Real"%self.modenum[num] ) )
curr_color = l1[-1].get_color() # color for this mode, as selected my MPL
#leg1.append("Real")
l2.extend( ax1.plot(WLs, EigsArray.imag, '-+', label="%i: Imag"%self.modenum[num], color=curr_color ) )
#leg2.append("Imaginary")
#ax1.plot(WLs, EigsArray[:,0].real, label="Mode "+str(i)+": real")
#ax2.plot(WLs, EigsArray[:,0].imag, label="Mode "+str(i)+": imag")
# add line indicating resonance, if found:
vlines = [] # holds handles of vertical lines
if np.any(self.__resonance_wavelength[num]):
# This line starts at the data coords `xytext` & ends at `xy`
ymin, ymax = ax1.get_ylim()
for ii, resWL in enumerate( self.__resonance_wavelength[num] ):
if ii==0:
'''Only add label once'''
vlines.append( ax1.vlines(resWL, ymin, ymax, linestyles='dashed', colors=curr_color, label="%i: Resonance"%self.modenum[num] ) )
else:
vlines.append( ax1.vlines(resWL, ymin, ymax, linestyles='dashed', colors=curr_color) )
#end for(resWL)
#end if(resonance)
vlines_out.append(vlines)
#end for(modenum)
ax1.set_xlabel(r"Wavelength, ($\mu{}m$)")
ax1.set_ylabel("Eigenvalue")
#ax2.set_ylabel("Imaginary")
titlestr = self.Cavity.name + " Eigenvalues for Mode "+str(self.modenum)
if title: titlestr = title + ": " + titlestr
ax1.set_title( titlestr )
ax1.grid(axis='both')
#plt.legend()
#leg = plt.legend()
leg = ax1.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
fig1.canvas.draw(); fig1.show()
# return some figure handles
if return_handles: return fig1, ax1, (l1, l2, vlines_out), leg
#end if(comp='EigV')
#-----------------------------------
# 2nd case: Plot the Fields
#-----------------------------------
elif np.any( np.array(comp)==np.array(fieldstrs) ):
# check if comp matches Any strings in `fieldstrs`, defined above the if(...), ln. 409
# -- Plot fields in structure --
# 1st arg: Component string for plot legend:
# (`comp` will be send to `get_field()` for parsing which field)
if comp == 'Ex'.lower():
compstr='Ex'
elif comp == 'Ey'.lower():
compstr='Ey'
elif comp == 'Ez'.lower():
compstr='Ez'
elif comp == 'Hx'.lower():
compstr='Hx'
elif comp == 'Hy'.lower():
compstr='Hy'
elif comp == 'Hz'.lower():
compstr='Hz'
elif comp == 'I'.lower():
compstr='Intensity'
elif comp=='rix' or comp=='index' or comp=='ri':
compstr='Refr. Index'
else:
raise ValueError("CavityMode.plot(field): Invalid field component requested.")
# get keyword arguments, with default:
RIplot = kwargs.pop('refractive_index', False) # plot refractive index?
zpoints = kwargs.pop('field_points', 3000) # number of points in field plot
xcut = kwargs.pop('xpoint', 0.0)
ycut = kwargs.pop('ypoint', 0.0)
zmin = kwargs.pop('zmin', 0.0)
zmax = kwargs.pop('zmax', (self.Cavity.LHS_Dev.get_length() + self.Cavity.RHS_Dev.get_length()) ) # default to total device length
zpoints = math.ceil( zpoints/2. ) # half as many zpoints in each of the two Devs
xpoints, ypoints = xcut, ycut # probably not needed - old method
PlotPoints = zpoints # not needed
"""
dirstr = kwargs.pop('direction', None)
if dirstr == None:
dirstr = 'bwd'
else:
dirstr = dirstr.lower().strip()
if dirstr=='fwd' or dirstr=='forwards' or dirstr=='f':
dirstr = 'Fwg'
elif dirstr=='bwd' or dirstr=='backwards' or dirstr=='b':
if comp=='i':
'''Due to Fimmwave typo bug: should be Title case. '''
dirstr = 'bwg'
else:
dirstr = 'Bwg'
elif dirstr=='total' or dirstr=='tot' or dirstr=='t':
dirstr = 'Total'
fieldstr = compstr + dirstr #attribute of FimmWave `zfieldcomp` object
"""
# 2nd arg: Figure out array index to proper wavelength
if len(args) >= 2:
wl = args[1]
else:
ErrStr="Cavity.plot(): For plotting a field component, 2nd argument must be the wavelength to plot. Please see `help(CavityMode.plot)` for more info."
raise ValueError(ErrStr)
#if DEBUG(): print "CavityMode.plot(field): wl= ", wl
zfield=[] # to hold fields at each mode number
for num,M in enumerate(self.modenum):
'''num goes from 0-># of modes requested. M tells use the actual mode number.'''
if DEBUG(): print "CavityMode.plot(field): (num, M) = (", num, ",", M, ")"
# find index to the specified wavelength in the list of calc'd wavelengths.
# `wl` is the passed argument, `WL` is the final wavelength
if isinstance(wl, str):
'''if 2nd arg is a string: '''
wl = wl.lower().strip() # to lower case + strip whitespace
if wl == 'resonance' or wl == 'res' or wl == 'max':
'''Find the resonant wavelength/eigval/eigvector'''
if DEBUG(): print "CavityMode.plot('res'): self.get_resonance_eigenvalues() = \n", self.get_resonance_eigenvalues()
if DEBUG(): print "CavityMode.plot('res'): self.get_resonance_wavelengths() = \n", self.get_resonance_wavelengths()
if np.all( np.array(self.__resonance_eigenvalue[num])==np.array([None]) ) or np.all( np.array(self.__resonance_wavelength[num])==np.array([None]) ):
'''No resonance found for this mode'''
ErrStr = "No resonance found for mode %i, "%(M) + "can't plot via `resonance`."
raise UserWarning(ErrStr)
# Find maximum Resonant EigenValue
Iwl = np.argmax( np.real( self.__resonance_eigenvalue[num] ) )
WL = self.__resonance_wavelength[num][Iwl]
Iwl = np.where( np.array([WL]) == self.wavelengths[:][num] )[0] # set to index of all calc'd WL's, not just resonance WLs
print "CavityMode.plot('res'): Getting field at resonance mode @ %f nm" %( WL )
if DEBUG(): print "Iwl=%s\nWL=%s"%(Iwl,WL)
else:
raise ValueError("CavityMode.plot(field): Unrecognized wavelength string. Please use 'resonance' or provide a wavelength in microns. See `help(CavityMode.plot)` for more info.")
else:
'''A specific wavelength (float/number) must have been passed: '''
WL = wl
Iwl = np.where( np.array([WL]) == self.wavelengths[num] )[0] # get index to specified wl
if not Iwl:
'''If wavelength not found in calculated WLs: '''
ErrStr = "CavityMode.plot(field): Wavelength `", WL, "` not found in the list of calculated wavelengths list (chosen during `Cavity.calc(wavelengths)`). See `help(CavityMode.plot)` for more info."
raise ValueError(ErrStr)
#end parsing `wl`
if DEBUG(): print "CavityMode.plot(): (num,Iwl)=(",num,",",Iwl,") \n" +\
"Setting Wavelength to WL=%f um"%WL
# Set FimmWave & Device wavelengths to proper value:
print self.Cavity.name + ": Setting Global & Device wavelength to %0.8f."%(WL)
set_wavelength(WL)
self.Cavity.RHS_Dev.set_wavelength(WL)
self.Cavity.LHS_Dev.set_wavelength(WL)
EigVec = self.eigenvectors[num][Iwl[0]] # find eigenvector at given wavelength
# Launch this eigenvector:
norm = False # normalize the launch vectors? V.Brulis said to disable this
self.Cavity.RHS_Dev.set_input( EigVec, side='left', normalize=norm )
self.Cavity.RHS_Dev.set_input( np.zeros( get_N() ), side='right' ) # no input from other side
# Get mode vector reflected from RHS device & launch it into LHS dev, to accomplish one roundtrip
vec = self.Cavity.RHS_Dev.get_output_vector(side='left', direction='left')
self.Cavity.LHS_Dev.set_input( vec, side='right', normalize=norm )
self.Cavity.LHS_Dev.set_input( np.zeros( get_N() ), side='left' ) # no input from other side
# Get field values:
Lfielddir, Rfielddir = 'total','total'
self.Cavity.LHS_Dev.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
Lfield = self.Cavity.LHS_Dev.get_field(comp, zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction=Lfielddir, calc=False)
self.Cavity.RHS_Dev.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
Rfield = self.Cavity.RHS_Dev.get_field(comp, zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction=Rfielddir, calc=False)
Lfield.extend(Rfield) # concatenate the L+R fields
zfield.append(Lfield) # add field for this mode number
#end for(modenums)
##################################
# plot the field values versus Z:
zfield = np.array(zfield)
TotalLength = self.Cavity.LHS_Dev.get_length() + self.Cavity.RHS_Dev.get_length()
z = np.linspace( 0, TotalLength, num=len(zfield[0]) ) # Z-coord
if DEBUG(): print "CavityMode.plot(field): len(zfield[0])=%i"%(len(zfield[0]) ) + \
"np.shape(zfield)=", np.shape(zfield), "\nz(%i) = "%len(z), z
lines=[] # to return
if RIplot:
Lindex = self.Cavity.LHS_Dev.get_refractive_index(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, calc=False)
Rindex = self.Cavity.RHS_Dev.get_refractive_index(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, calc=False)
Lindex.extend(Rindex) # concatenate the L+R indices
rix=Lindex # add field for this mode number
fig1, (ax1,ax2) = plt.subplots(2, sharex=True) # 2 axes
axes=(ax1,ax2) # to return
# Reduce axis width to 80% to accommodate legend:
box = ax2.get_position()
ax2.set_position([ box.x0, box.y0, box.width * 0.8, box.height])
l2 = [ ax2.plot(z, np.real( np.array(rix) ), 'g-', label="Refractive Index" ) ] # plot RIX on 2nd sibplot
lines.append(l2)
else:
fig1, ax1 = plt.subplots(1, 1) # 1 axis
axes=ax1 # to return
# Reduce axis width to 80% to accommodate legend:
box = ax1.get_position()
ax1.set_position([ box.x0, box.y0, box.width * 0.8, box.height])
l1 = []; #l2 = []
for num,M in enumerate(self.modenum):
'''num goes from 0-># of modes requested. M tells us the actual mode number.'''
#if DEBUG(): print "CavityMode.plot(field): num in modenum = ", num, type(num), " in ", self.modenum, type(self.modenum)
#l1 = []; l2 = []; leg1 = []; leg2=[]
if DEBUG(): print "zfield[%i] = " %(num), zfield[num]
l1.append( ax1.plot(z, np.real(zfield[num]), '-', label="%i: %s"%(self.modenum[num], compstr) ) )
lines.append(l1[num])
#leg1.append("Real")
#end for(modenum)
ax1.set_ylabel( "Field %s"%(compstr) )
titlestr = self.Cavity.name + ": %s vs. Z for Mode @ %0.2f $\mu{}m$"%(compstr,WL)
if title: titlestr = title + ": " + titlestr
fig1.suptitle( titlestr , fontsize=11)
ax1.grid(axis='both')
#plt.legend()
if RIplot:
ax2.set_ylabel('Refractive Index')
ax2.set_xlabel(r"Z, ($\mu{}m$)")
ax2.grid(axis='both')
else:
ax1.set_xlabel(r"Z, ($\mu{}m$)")
#leg = plt.legend()
leg = ax1.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
#leg2 = ax2.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
fig1.canvas.draw(); fig1.show()
# return some figure handles
if return_handles:
if RIplot:
return fig1, axes, lines, leg
else:
return fig1, axes, lines, leg
#end if(comp=='Ex, Ey etc.')
else:
'''If component specified is unrecognized: '''
ErrStr = "CavityMode.plot(): Invalid field component specified: `%s`. \n\tSee `help(pyFIMM.CavityMode.plot)`." %(args[0])
raise ValueError(ErrStr)
#end if(component)
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Cavity.plot(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
if warn: print ErrStr
#end plot
def get_resonance_wavelengths(self, ):
'''Return the resonance wavelength for selected modes, as list, with each list index corresponding to the selected mode. Returns `None` if no resonances found.'''
out = []
for num, M in enumerate(self.modenum):
out.append( self.__resonance_wavelength[num] )
return out
# alias to same function:
get_resonance_wavelength = get_resonance_wavelengths
def get_resonance_eigenvalues(self, ):
'''Return the eigenvalue at the resonance wavelengths selected modes, as list, with each list index corresponding to the selected mode. Returns `None` if no resonances found.'''
out = []
for num, M in enumerate(self.modenum):
out.append( self.__resonance_eigenvalue[num] )
return out
# alias to same function:
get_resonance_eigenvalue = get_resonance_eigenvalues
def get_resonance_eigenvectors(self, ):
'''Return the eigenvector at the resonance wavelengths selected modes, as list, with each list index corresponding to the selected mode. Returns `None` if no resonances found.'''
out = []
for num, M in enumerate(self.modenum):
out.append( self.__resonance_eigenvector[num] )
return out
# alias to same function:
get_resonance_eigenvector = get_resonance_eigenvectors
def get_cavity_losses_frac(self, ):
'''Return the cavity loss (equivalent to threshold gain) for this mode, as a fractional power of the input mode (eigenvector).
Eg. a value of 0.4 means that 40% of the power in this mode was lost.
'''
#print "get_cavity_loss(): WARNING: Not implemented."
out = []
for num, M in enumerate(self.modenum):
val = self.__resonance_loss[num]**2 # convert amplitude to power
out.append( val )
return out
# alias to same function:
get_cavity_loss_frac = get_cavity_losses_frac
def get_cavity_losses_dB(self, ):
'''Return the cavity loss (equivalent to threshold gain) for this mode, as a fractional power of the input mode (eigenvector) in dB.
Eg. a value of +3.0 means that 3dB of the power in this mode was lost.
'''
#print "get_cavity_loss(): WARNING: Not implemented."
out = []
for L in self.get_cavity_losses_frac():
val = -10*np.log10( 1.0 - L ) # convert fractional power to dB
out.append( val )
return out
# alias to same function:
get_cavity_loss_dB = get_cavity_losses_dB
def get_cavity_losses_m(self, ):
'''Return the cavity loss (equivalent to threshold gain) for this mode, as meter^-1.
Eg. a value of 0.4 means that the cavity loss for this cavity mode is 0.4m^-1.
'''
# alpha(cm^-1) = -ln(lambda)/(2*L[cm])
out = []
for num, M in enumerate(self.modenum):
val = -1*np.log( (1.0-self.__resonance_loss[num]) ) / ( 2* self.Cavity.get_length() ) # length is in meters
out.append( val )
return out
# alias to same function:
get_cavity_loss_m = get_cavity_losses_m
def get_cavity_losses_cm(self, ):
'''Return the cavity loss (equivalent to threshold gain) for this mode, as centimeter^-1.
Eg. a value of 0.4 means that the cavity loss for this cavity mode is 0.4 cm^-1.
'''
out = []
for L in self.get_cavity_losses_m():
val = L / 100 # convert from m^-1 --> cm^-1
out.append( val )
return out
# alias to same function:
get_cavity_loss_cm = get_cavity_losses_cm
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,420
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/colormap_HotCold.py
|
# ColorMap
# Red-Black-Blue, like Matlab's 'FireIce' or 'HotCold'
# http://stackoverflow.com/questions/24997926/making-a-custom-colormap-using-matplotlib-in-python
from matplotlib.colors import LinearSegmentedColormap
ltblue = [x/255. for x in (170,170,255)] # set the RBG vals here
ltred = [x/255. for x in (255,100,100)]
cm_hotcold = LinearSegmentedColormap.from_list('coldhot', [ltblue, 'black', ltred] , N=256)
'''
# Use as so,
# to keep black at 0, set vmin/vmax to extent of data:
maxfield = np.max( np.abs( np.array(field).real ) )
cont = ax.contourf( np.array(x), np.array(y), np.array(field) , vmin=-maxfield, vmax=maxfield, cmap=cm_coldhot)
(also for pcolor() etc.)
'''
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,421
|
demisjohn/pyFIMM
|
refs/heads/master
|
/example5 - open Device from File with Variables v1.py
|
'''
##########################################################################
Example 5:
Import a Project + Device from File & access the internal variables
##########################################################################
'''
import pyfimm as pf # Every script must begin with this line
pf.connect()
import sys, os
ScriptPath, ScriptFile = os.path.split( os.path.realpath(__file__) ) # Get directory of this script
pf.set_working_directory(ScriptPath) # Set FimmWave directory to the location of your script (needed to capture output files)
''' Since we're loading an existing Project, we might not need any of these global parameters. Haven't tested that yet. '''
pf.set_eval_type('n_eff') # FIMMWAVE will label modes by the effective index (options: n_eff or beta)
pf.set_mode_finder_type('stable') # options: stable or fast
pf.set_mode_solver('vectorial FMM real') # Three words, any permuation of: 'vectorial/semivecTE/semivecTM FDM/FMM real/complex' for RWG.
pf.set_wavelength(1.55) # The unit of space is always 1 micrometer
pf.set_N_1d(100) # # of 1D modes found in each slice (FMM solver only)
pf.set_NX(100) # # of horiz. grid points for plotting & FDM
pf.set_NY(100) # # of vertical grid points for plotting & FDM
pf.set_N(3) # # of modes to solve for
pf.set_material_database('Materials/refbase.mat')
#####################################################
# Import a Device from a saved FimmWave project file
#
# First open the Project file
# Then make a new pyFIMM Device that points to the loaded Device
#####################################################
#pf.set_DEBUG() # Turn on Debugging verbose output.
ex5prj = pf.import_Project('example5 - Device with Variables v1.prj', overwrite=True)
# If the project is already loaded, try `overwrite='reuse'` to prevent reloading it.
# Tell pyFIMM the name of the Variable Node in this Project:
ex5prj.set_variables_node('Variables 1')
# The variables can be interrogated, get and set, via the Project's new attribute: `ex5prj.variablesnode`
# For example:
#print ex5prj.variablesnode.get_var('wCore')
#allvars = ex5prj.variablesnode.get_all() # save all vars as dictionary
print ex5prj.variablesnode # show all variables and formulae
# See `help(ex5prj.variablesnode)` for the full list of methods.
# Load the Device '1x2 Coupler' into a pyFIMM Device object:
dev = pf.import_device(project=ex5prj, fimmpath='1x2 Coupler')
'''
We just opened a Device from a file, and made a pyFIMM Device object
that points to it. Since the Device was made in FimmProp, not pyFIMM,
pyFIMM does not try to understand it's inner workings in detail.
Many Device properties are still created though, so that you can
plot fields, reference elements etc.
'''
# Do something with the new Device:
print dev.name + ": Total Device Length = %f um" %( dev.get_length() )
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,422
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/proprietary/ExampleModule.py
|
'''
pyFIMM/proprietary/ExampleModule.py
This is an example of how to add your own proprietary functionality to pyFIMM.
You could also keep this file outside the main pyFIMM directory and import it in your script,
but importing it as part of pyFIMM gives it access to all the pyFIMM methods etc.
This example module adds the following functions:
Creates a new function `get_total_width()` as part of this module.
and
Adds a `set_temperature()` method to the `Waveguide` object
Adds a `get_temperature()` method to the `Waveguide` object
The functions can then be called as so:
>>> pf.ExampleModule.get_total_width( WaveguideObj1, WaveguideObj2, WaveguideObj3 )
and
>>> WaveguideObj1.set_temperature( 451.0 ) # set the waveguide's temperature
'''
from ..__globals import * # import global vars & FimmWave connection object & DEBUG() variable
import numpy as np
'''
########################################################
New Functions from this ExampleModule
########################################################
'''
def get_total_width( *args ):
'''Return the total width of the waveguides passed.
Parameters
----------
*args : any number of Waveguide or Circ objects, each as an individual arguments
Examples
--------
>>> pf.ExampleModule.get_total_width( WaveguideObj1, WaveguideObj2, WaveguideObj2 )
: 44.2 # returns the total width in microns
'''
width = 0
for wg in args:
width += wg.get_width()
return width
'''
########################################################
New Functions for the Waveguide object
########################################################
'''
from ..__Waveguide import * # import the Waveguide class, to add functions to it.
# `self` here will be the Waveguide object, once this func is called as a method of that object
# Use a temporary place-holder name. The real name comes later when we add it to the Waveguide Class
# Double-underscores (___ is a convention that means this function should be hidden from the user. We don't want anyone calling this function directly (ie. not as a Waveguide method).
def __WG_set_temperature(self,temp):
'''Set temperature of this Waveguide. FimmWave default is -1000.0.
Waveguide Object should have already been built.
Parameters
----------
temp : float
Temperature in degrees Celcius.
Examples
--------
>>> WaveguideObj.set_temperature( 25.0 )
'''
if not self.built: raise UserWarning( "Waveguide.set_temperature(): This waveguide has not been built yet! Please call WaveguideObj.buildNode() first!" )
# Construct the command-string to send to FimmWave:
wgString = self.nodestring + ".temp = " + str(temp)
# nodestring is the fimmwave string to reference this Waveguide node.
# So this command expands to something like:
# app.subnodes[1].subnodes[5].temp = 451.0
# Execute the above command:
fimm.Exec(wgString)
#end __WG_set_temperature()
# add the above function to the Waveguide class:
Waveguide.set_temperature = __WG_set_temperature
# This determines the real name of the function as a Waveguide method, and points to this function.
def __WG_get_temperature(self):
'''Return temperature setting of this Waveguide.
Returns
-------
temp : float
Temperature in degrees Celcius. Defaults to `-1000.0` if unset.
'''
return fimm.Exec( self.nodestring + ".temp" )
#end __WG_get_temperature()
# add the above function to the Waveguide class:
Waveguide.get_temperature = __WG_get_temperature
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,423
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/PhotonDesignLib/__init__.py
|
#/usr/bin/python2.7
#
# __init__.py
# Module to load all PhotonDesign Python libraries/modules
# This file will cause the folder it's in to be a Python module
# to be importable as a module, where the module automatically includes
# all the files within the folder.
#
# Taken from here:
# http://stackoverflow.com/questions/1057431/loading-all-modules-in-a-folder-in-python
#
# Demis John, Oct 2014
#
############################################################
import os # file path manipulations
import glob # file-name matching
# the following directs __init__ to import add to __all__ all the files within it's directory that match *.py
__all__ = [ os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__)+"/*.py")]
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,424
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Device.py
|
'''Device class, part of pyFIMM.'''
from __globals import * # import global vars & FimmWave connection object
# DEBUG() variable is also set in __globals, & numpy as np & pyplot as plt
from __pyfimm import * # import the main module (should already be imported)
# NOTE: shouldn't have to duplicate the entire pyfimm file here! Should just import the funcs we need...
from __pyfimm import get_N # deprecated - use self.get_N() for device-specific N()
from __Waveguide import Waveguide # rectangular waveguide class
from __Circ import Circ # cylindrical (fiber) waveguide class
from __Tapers import Taper,Lens # import Taper/WGLens classes
from __Mode import Mode # import Mode class
## Moved to __globals.py:
#import numpy as np # array math etc.
#import matplotlib.pyplot as plt # plotting - to get a new figure
class Device(Node):
"""Device( elements )
The Device class constructs a FIMMProp Device, for propagating through multiple waveguides.
A Device Node contains multiple waveguide components (with lengths or paths) stitched together.
By default the waveguides are joined by the Simple Joint type, and parameters are inherited from the Device node.
Inherits from the Node class - see help on pyFIMM.Node for member functions/properties.
Please type `dir(DeviceObj)` or `help(DeviceObj)` to see all the attributes and methods available.
Parameters
----------
elements : List of { Waveguide object | Circ objects }
Attributes
----------
elements : list
List containing Waveguide, Circ, Taper or other waveguide-type objects. The Device is constructed from left-to-right starting at elements[0].
You can interrogate these element objects as well, eg. DeviceObject.elements[0].length = 2.50 etc.
name : string
Name of this Device in fimmwave Node.
built : { True | False }
Whether or not this Device has been built in Fimmwave via buildNode().
origin : { 'pyfimm' | 'fimmwave' }
Indicates whether this Device was built using pyFIMM, or was constructed in FimmWave & imported via `import_device()`.
After Device.buildNode() has been called, the following become available:
num : int
FimmWave Node number for this device
nodestring : string
String used to access this Device node in fimmwave, for example: "app.subnodes[1].subnodes[3]"
elementpos : list
List containing integers, indicating position (eltlist[position]) that each element was inserted into in the fimmwave Device.
You can iterate through this list to reference each element in the device, using the list value as the `eltnum[x]` - this will also account for Referenced elements (in an imported Dev), as the list value will point to the original element rather than the reference.
For example, if the 1st element is a Refernce to the 4th element, `elementpos` might look like this: `elementpos = [4, 2, 3, 4, 5]`
jointpos : list
List containing integers, indicating position (eltlist[position]) of the simple joints in the fimmwave Device.
Methods
-------
This is a partial list - see `dir(pf.Device)` to see all methods.
Please see help on a specific function via `help(pf.Device)` for detailed up-to-date info on accepted arguments etc.
set_input_field( vector , side=None)
Set the input field with a vector specifying the amplitudes (complex) of each mode to launch.
vector should be a list with same length as `get_N()`, number of modes.
side specifies which side to launch on, 'left' or 'right'
set_inc_field() is a synonym.
set_joint_type(type)
Set the type of FimmProp joint to use between all waveguides in this Device.
get_joint_type(type)
Get the type of FimmProp joint to use between all waveguides in this Device.
Examples
--------
Call Device similar to Slice() or Waveguide():
>>> dev1 = Device( WG1(100.0) + WG_Air(10.0) + WG2(100.0) )
This concatenates the structure WG1 (elongated by 100um) to WG_Air (10um long) and WG2 (100um long).
The waveguide types can be any pyFIMM supported type, such as Waveguide (rectangular coords), or Circ (cylindrical coords).
Mode solver options are available for each type.
To Do:
------
- Add support for Tapers/WGLenses
- Add support for Input/Output ports, eg. Device( IOPort1() + WG1(100.0) + IOPort2() )
- Input Device objects (one nesting layer only) and construct node from constituent elements of each Device object.
- add suport for Paths (eg. non-straight WG's) - Done, use `import_device()`
"""
def __init__(self,*args):
#if DEBUG(): print "Device Constructor: args=\n", args
if DEBUG(): print "Device Constructor: "
if DEBUG() and len(args) > 0: print str( len(args[0]) ) + " elements passed."
self.origin = 'pyfimm' # Device was constructed in pyFIMM
self.name = None
self.calculated= False # has this Device been calculated yet?
self.built=False # has the Dev been build in FimmProp?
self.input_field_left = None # input fields
self.input_field_right = None
self.__wavelength = get_wavelength() # get global wavelength
self.__inc_field = None # incident/input field - DEPRECATED
self.elementpos = [] # positions in eltlist[] of each element/joint
self.jointpos = []
self.matDB = None # material database path
if len(args) == 1:
self.lengths = []
self.elements = []
for i in range(len(args[0])):
'''a [list] of Section objects is passed (created by the __add__ method of the Section class).
each Section object has the attribute SectionObj.WG which is the original Waveguide/Circ object.
the `length` attribute is set in the Section when the WGobj is called with an argument (the __call__ method of the WG Object).'''
self.elements.append( args[0][i].WG )
self.lengths.append( args[0][i].get_length() )
#if DEBUG(): print "\nElements=\n", self.elements, "\nLengths=\n", self.lengths
elif len(args) == 0:
self.lengths = []
self.elements = []
else:
raise ValueError('Invalid number of arguments to Device()')
def __str__(self):
'''How to `print()` this object'''
string=""
if self.name: string += "Name: '"+self.name+"'\n"
string += 'Total Length = %7.4f \n' % self.get_length()
for i,el in enumerate(self.elements):
if i == 0:
string += 6*'*' + ' Left-Hand Section ' + 6*'*' + '\nlength = %7.4f \n' % self.lengths[i] + '\n%s' % (el) + '\n'
elif i == (len(self.elements)-1):
string += 6*'*' + ' Right-Hand Section ' + 6*'*' + '\nlength = %7.4f \n' % self.lengths[i] + '\n%s' % (el) + '\n'
else:
string += 6*'*' + ' Middle Section %i ' % i + 6*'*' + '\nlength = %7.4f \n' % self.lengths[i] + '\n%s' % (el) + '\n'
return string
#end __str__
def __len__(self):
'''Number of elements in this Device.'''
return len(self.elements)
def __call__(self):
'''Calling a Device object creates a Section of passed length, and returns a list containing this new Section.
Usually passed directly to Device as so:
>>> NewDevice = pyfimm.Device( DeviceObj() + WG2(1.25) + WG3(10.5) )
'''
# Instantiate a Section obj with 1 args
out = [ Section( self ) ]
return out
def __add__(self, other):
'''To Do: Allow devices to be added together, concatenating their elements.'''
raise Error("Device addition currently unsupported.")
def get_origin(self):
'''Return 'pyfimm' if this Device was constructed in pyFIMM, or 'fimm' if the Device was constructed in FimmProp.
Dev's constructed in pyFIMM will have a list of elements, which reference pyFIMM waveguide objects etc. Dev's constructed in FimmProp will not have such properties, but can take advantage of FimmProp's GUI construction, etch/grow paths and other functionality not achievable in pyFIMM.'''
return self.origin
def get_length(self):
'''Return summed lengths of contained elements - total length of this Device.'''
try:
return np.sum(self.lengths)
except TypeError:
pass
#raise ValueError("Could not determine length of some Device elements. Possibly due to ELement referencing another node.")
def set_length(self,element_num,length):
'''Set the length of a particular element in the Device. (Elements are counted from the left-most side first, starting at #1.)
element_num : int
The element to modify.
length : float
The new length of the selected element.
'''
#prj_num = self.parent.num
#node_num = self.num
#app.subnodes[{"+ str(prj_num) +"}].subnodes[{"+ str(node_num) +"}]
fimm.Exec( self.nodestring + ".cdev.eltlist[{"+ str(int(element_num)) +"}].length={"+ str(float(length)) +"}" )
def calc(self, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0):
'''Calculate the fields (E, H, P, I, Refr.Index) along Z in the Device.
You should do this before using `get_fields()`, `get_refractive_index()` or `plot()` or similar functions, or set the `calc=True` option in those functions.
This function doesn't return anything, but just causes Fimmwave to internally calculate these parameters.
Parameters
----------
xcut, ycut = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
zpoints = integer, optional
Number of points to acquire in the field. Defaults to 3000.
zmin, zmax = float, optional
min & max z-coorinates. Defaults to 0-->Device Length (plot entire Device).
'''
if not zmax: zmax = self.get_length()
fimm.Exec(self.nodestring + ".calczfield("+ str(zpoints) +","+ str(zmin) +", "+ str(zmax) +","+ str(xcut) +","+ str(ycut) +",1)" +"\n")
# other possible functions:
# calcfieldprofile(): FUNCTION (zpos,fieldType[0-total,1-fwd,2-bwd,3-field vecs],refelt[0-wrt cpt,N-wrt elt N],refpt[0-beg elt,1-end elt]):
# stores field Xsection in fieldprofile. refpt,refelt default to 0,0
# fieldprofile
# PDObject field Xsection evaluated by calcfieldprofile
#
self.calculated=True
#end calc()
def set_material_database(self, path):
'''Set the path to the material database (*.mat) file. Only needed if you are defining materials using this database ('mat'/material type waveguides instead of 'rix'/refractive index). This sets a materials file that will be used only by this Device.
Although waveguide nodes can specify their own (different) materials files, it is recommended that a global file be used instead since FimmProp Devices do not accept multiple materials files (to avoid confusion and identically-named materials from different files). The single global file can be set to `include` any other materials files.
Parameters
----------
path : string
Absolute or relative path to the material database file. `path` will be automatically converted to an absolute path, as a workaround to a FimmProp Device Node bug that causes it to only accept absolute paths.
'''
import os
path = os.path.abspath(path) # convert to absolute path
if os.path.isfile(path):
self.matDB = str(path)
fimm.Exec( self.nodestring + '.setmaterbase(%s)'%(self.matDB) )
else:
ErrStr = "Material database file does not exist at the specified path `%s`" %(path)
raise IOError(ErrStr)
if DEBUG(): print "Device '%s'.matDB = "%(self.name), self.matDB
def get_material_database(self,):
'''Get path to this Device's material database file.
Returns
-------
path : string
Absolute path to the material database file used by this node.
'''
try:
self.matDB
except:
if DEBUG(): print "unset global_matDB --> None"
self.matDB = fimm.Exec( self.nodestring + '.materbasename()' )
return self.matDB
def get_N(self,):
'''Get max number of modes solved for in this Device.
Returns
-------
N : int
Number of modes, set as 'maxnmodes' in MOLAB parameters.
'''
return int( fimm.Exec( self.nodestring + ".mlp.maxnmodes") )
def set_N(self, N):
'''Set max number of modes to solve for in this Device.
Parameters
----------
N : int
Max number of modes to solve for, set by `maxnmodes` in MOLAB parameters.
'''
fimm.Exec( self.nodestring + ".mlp.maxnmodes " + str(int(N)) )
def set_joint_type(self, jtype, jointoptions=None):
'''Set the joint type to use between each element of this Device. This option, if set, overrides each element's own options for the joint type (set by `element.set_joint_type()`).
type : { 'complete' | 'special complete' | 'normal fresnel' | 'oblique fresnel' }, case-insensitive
synonyms for 'complete' are { 0 }, and is also the default if unset.
synonyms for 'special complete' are { 3 | 'special' }
synonyms for 'normal fresnel' are { 1 | 'fresnel' }
synonyms for 'oblique fresnel' are { 2 }
jointoptions : Dictionary{} of options. Allows for the Device.buildnode() to set various joint options, such as angle etc. Please see help(Device) for what the possible options are.
'''
if isinstance(jtype, str): jtype=jtype.lower() # make lower case
if jtype == 0 or jtype == 'complete':
self.__jointtype = 0
if jtype == 1 or jtype == 'normal fresnel' or jtype == 'fresnel':
self.__jointtype = 1
if jtype == 2 or jtype == 'oblique fresnel':
self.__jointtype = 2
if jtype == 3 or jtype == 'special complete' or jtype == 'special':
self.__jointtype = 3
if isinstance(jointoptions, dict):
self.__jointoptions=jointoptions
elif jointoptions!=None:
ErrStr = "set_joint_type(): `jointoptions` should be a dictionary. See help(Device) for the available options."
raise ValueError(ErrStr)
#end set_joint_type()
def get_joint_type(self, *args):
'''get_joint_type( [asnumeric] )
Get the joint type that will be placed between each waveguide in this Device.
asnumeric : boolean, optional
A True value will cause the output to be numeric, rather than string. See help(set_joint_type) for the numerical/string correlations. False by default.
(FYI, `asnumeric=True` is used in Device.buildNode() )
Returns
-------
The joint type as a string, or as integer if `asnumeric` was True.
If unset, returns `None` (regardless of `asnumeric`), in which case the element's settings for joint-type will be used (`element.get_joint_type()`).
'''
try:
self.__jointtype # see if variable exists
except AttributeError:
# if the variable doesn't exist yet.
if DEBUG(): print "unset " + self.name + ".__jointtype --> None "
self.__jointtype = None
if len(args) == 0: asnumeric = False # output as string by default
if len(args) == 1: asnumeric = args[0]
if len(args) > 1: raise ValueError("get_joint_type(): Too many arguments provided.")
if asnumeric:
out= self.__jointtype
else:
if self.__jointtype == 0:
out= 'complete'
elif self.__jointtype == 1:
out= 'normal fresnel'
elif self.__jointtype == 2:
out= 'oblique fresnel'
elif self.__jointtype == 3:
out= 'special complete'
elif self.__jointtype == None:
out= None
#if DEBUG(): print "get_joint_type(): ", out
return out
#end get_joint_type()
def unset_joint_type(self):
'''Unset the Device-level joint type, so each element's settings will be used instead.
`DeviceObj.get_joint_type()` will consequently return `None`.'''
self.__jointtype = None
def set_wavelength(self, wl):
'''Set the wavelength for the entire Device. Elements will all use this wavelength in their MOLAB options.
Note that, after building, the Device wavelength (`DeviceObj.get_wavelength()` ) can be different from the global pyFIMM wavelength (`pyFIMM.get_wavelength`).
The global setting (`pyFIMM.set_wavelength()`) is acquired when the object is first created.
Parameters
----------
wl : float
The wavelength in micrometers.
'''
if self.built:
self.__wavelength = float(wl)
fimm.Exec( self.nodestring + ".lambda = " + str(self.__wavelength) + " \n" )
else:
self.__wavelength = float(wl)
def get_wavelength(self):
'''Return the wavelength (float) for this specific Device (may be different from the global pyFIMM wavelength in `pyFIMM.get_wavelength()` after the Device is built).'''
return self.__wavelength
def set_input(self,mode_vector, side=None, normalize=False, warn=False):
'''Set input ("incident") field vector - takes a list with amplitude coefficients (complex) for each mode number, as entered into the "Vector" mode of the "View > Set Input" menu of a FimmWave Device.
`set_inc_field()` is an alias to this function.
Parameters
----------
mode_vector : array-like or integer
To set the input as a vector (list of mode amplitudes), pass a List of complex amplitudes for each mode's excitation amplitude/phase. Length of amplitude-list must equal the number of lateral modes, get_N() (ie. every mode of the waveguide should have a specified amplitude).
To set the input as just a modenumber, pass an integer.
To turn off an input, pass `None`.
side : { 'left' | 'right' }, required, case-insensitive
Which side to inject fields into. The string "LHS" (left-hand side) or "RHS" (right-hand side) should be used. Synonyms for "LHS" are "left" and "L", and correspondingly for "RHS" the synonyms are "right" and "R".
Defaults to 'LHS' for backwards compatibility.
normalize : boolean, optional
Tell fimmwave to normalize the input vector (just sets the "normalize" flag in the `Set Input` Window). Default = False.
warn : Boolean
Print warning messages? True by default.
Use `get_input_field()` to return the currently set input for the Device.
Examples
--------
For a device names 'Dev1', with set_N() set to 5 (five modes calculated), set the input field to inject only the first mode, into the right-hand side of the device, as so:
>>> Dev1.set_input_field( [1,0,0,0,0], side='right')
To turn off the input on the left side, do:
>>> Dev1.set_input_field( [0,0,0,0,0], side='left')
or, equivalently:
>>> Dev1.set_input_field( numpy.zeros( pyFIMM.get_N() ) , side='left')
'''
if side == None:
side='lhs' # default value if unset
if warn or WARN(): print "WARNING: Device '%s'.set_input_field():"%self.name + " set to Left-Hand-Side input, since unspecified."
else:
side = side.lower().strip() # make lower case, strip whitespace
if (side == 'lhs') or (side == 'left') or (side == 'l'):
sidestr = 'lhs'
self.input_field_left = mode_vector
elif (side == 'rhs') or (side == 'right') or (side == 'r'):
sidestr = 'rhs'
self.input_field_right = mode_vector
else:
ErrStr = "Device '%s'.set_input_field(): "%self.name + "Unsupported side passed: `" + str(side) + "`. \n\tPlease use 'Left' or 'Right', or see `help(pyfimm.Device.set_input_field)`."
if DEBUG(): print "side.lower() = ", side.lower()
raise ValueError(ErrStr)
'''
prj_num = self.parent.num
node_num = self.num
'''
fpString = ''
if mode_vector == None:
# if `None` was passed, Turn off input on this side by setting input = Mode 0
mode_vector = int(0)
if isinstance(mode_vector, int):
# an integer was passed, so set to mode component
fpString += self.nodestring + "." + sidestr + "input.inputtype=1" + "\n" # mode number input
fpString += self.nodestring + "." + sidestr + "input.cpt=" + str(mode_vector - 1) + "\n"
if sidestr == 'lhs':
self.input_field_left = mode_vector - 1
elif sidestr == 'rhs':
self.input_field_right = mode_vector - 1
else:
# assume an array-like was passed, so set the input as a vector
ampString = str(mode_vector[0].real)+","+str(mode_vector[0].imag)
for ii in range( 1, self.get_N() ):
ampString += ","+str(mode_vector[ii].real)+","+str(mode_vector[ii].imag)
fpString = self.nodestring + "." + sidestr + "input.inputtype=2" + "\n" # vector input
fpString += self.nodestring + "." + sidestr + "input.setvec(" + ampString + ") \n"
#end isinstance(mode_vector)
if normalize:
fpString += self.nodestring + "." + sidestr + "input.normalise=1 \n"
else:
fpString += self.nodestring + "." + sidestr + "input.normalise=0 \n"
fimm.Exec(fpString)
#end set_input_field()
# Alias for the same function:
set_inc_field = set_input
set_input_vector = set_input
def set_input_field(self):
'''DEPRECATED: Perhaps you mean to use `set_input()`.'''
raise NameError("DEPRECATED: Perhaps you mean to use `set_input()`, which accepts a field vector or mode number.")
def get_input(self):
'''Return the input field vector.
Returns a list, like so [<Left-hand field> , <Right-hand field>].
If a side has no input field, it will contain only the value `None`.
If <Left-hand field> is itself a list, then the input type is a vector, while if an integer is returned, then the input type is just a mode number. You can check for whether the returned type is a vector as so
>>> input_field = Dev1.get_input_field()[0]
>>> left_field = Dev1.get_input_field()[0] # get the left-
>>> isinstance( left-field , int ) # returns True
Examples
--------
Dev.set_input_field( [1,0,0], side='left') # vector-type input
Dev.get_input_field()
>>> [ [1,0,0], None ]
# Which indicates that there is no Right-hand input, and the left-hand input launches only the 1st mode.
'''
"""
# Obsolete - FimmProp can't return the current Vector input, so just using internal values
Ltype = fimm.Exec( self.nodestring + ".lhsinput.inputtype" )
if Ltype==1:
'''mode number'''
self.input_field_left = fimm.Exec( self.nodestring + ".lhsinput.cpt" )
elif Ltype == 2:
self.input_field_left = fimm.Exec( self.nodestring + ".lhsinput.getvec" )
else:
raise self.name + ".get_input_field [left]: Unsupported input field type. Only Mode number & Vector are supported."
Rtype = fimm.Exec( self.nodestring + ".lhsinput.inputtype" )
if Rtype==1:
'''mode number'''
self.input_field_right = fimm.Exec( self.nodestring + ".lhsinput.cpt" )
elif Rtype == 2:
self.input_field_right = fimm.Exec( self.nodestring + ".lhsinput.getvec" ) <<--- doesn't exist
else:
raise self.name + ".get_input_field [right]: Unsupported input field type. Only Mode number & Vector are supported."
"""
out=[]
if np.all( np.array(self.input_field_left) == 0 ):
out.append(None)
else:
out.append( self.input_field_left )
if np.all( np.array(self.input_field_right) == 0 ):
out.append(None)
else:
out.append( self.input_field_right )
return out
#end get_input_field()
# Alias for the same function:
get_inc_field = get_input
get_input_vector = get_input
def get_output_vector(self, side='right', direction='right'):
'''Return the output field vector, for a given input field vector (`set_input_field()`).
FimmProp calculates the scattering matrix of the Device and `propagates` the input field vectors (see `DeviceObj.set_input_field()` ) through the device, resulting in the output mode vector.
This function does not currently output the 2D field profile, but only a field vector, which can be used to calculate the output field profile using the mode basis set and the field vector as the coefficients of each mode.
Parameters
----------
side : { 'left' | 'right' }, case-insensitive, optional
Which side to inject fields into. The string "left" (left-hand side) or "right" (right-hand side) should be used. Synonyms for "LHS" are "left" and "L", and correspondingly for "RHS" the synonyms are "right" and "R".
Defaults to 'right' side for convenience.
direction = string { 'fwd', 'bwd' }, case insensitive, optional
Which propagation direction to return vectors for. Defaults to 'right'.
"forward" & "backwards" correspond with propagation in the +z & -z directions, respectively.
Synonyms for 'fwd' include 'forward', 'f', 'right', 'r', '+z'.
Synonyms for 'bwd' include 'backward', 'b', 'left', 'l', '-z'.
Defaults to 'right' (forward) for convenience.
Returns
-------
Vect : list
List of length `get_N()`, with complex values corresponding to each mode in the basis-set.
'''
side = side.lower().strip() # make lower case, strip whitespace
if (side == 'lhs') or (side == 'left') or (side == 'l'):
#sidestr = 'lhs'
sidenum = 0 #LHS
elif (side == 'rhs') or (side == 'right') or (side == 'r'):
#sidestr = 'rhs'
sidenum = 1 #RHS
else:
ErrStr = "get_output_field(): Unsupported side passed: `" + side + "`. \n\tPlease use 'Left' or 'Right', or see `help(pyfimm.Device.set_inc_field)`."
if DEBUG(): print "side.lower() = ", side.lower()
raise ValueError(ErrStr)
direction = direction.strip().lower() # make lower case, strip whitespace
'''Always returning vectors, so dirnum 0-Tot, 1-fwd, 2-bwd ignored - only needed for getting XY field profile.'''
if direction=='fwd' or direction=='forwards' or direction=='forward' or direction=='f' or direction=='right' or direction=='r' or direction=='+z':
dirstr = 'fwd'
#dirnum = 1
elif direction=='bwd' or direction=='backwards' or direction=='backward' or direction=='b' or direction=='left' or direction=='l' or direction=='-z':
dirstr = 'bwd'
#dirnum = 2
else:
ErrStr = "Device.get_output_field(): Unrecognized `direction` passed: `%s`.\n\t"%(direction) + "Please use 'Left' or 'Right', or see `help(pyfimm.Device.set_inc_field)`. "
raise ValueError(ErrStr)
dirnum = 3 # calculate field vectors, as opposed to output field
prj_num = self.parent.num
node_num = self.num
#app.subnodes[{"+str(prj_num)+"}].subnodes[{"+str(node_num)+"}]
fpString = self.nodestring + ".calcoutputfield(" + str(dirnum) + "," + str(sidenum) + ") " +"\n"
ret = fimm.Exec(fpString)
#if DEBUG(): print "get_output_vector():calcoutputfield():", ret
#app.subnodes[{"+str(prj_num)+"}].subnodes[{"+str(node_num)+"}]
fpString = self.nodestring + "." + dirstr + "coeffs() " +"\n"
out = fimm.Exec(fpString)
return out[0][1:] # strip the useless matrix chars `None` & `EOL` that FimmWave returns
#end get_output_vector()
def get_input_field(self, component='I', mode_vector=None, side='left', include_pml=True):
'''Return the input field. Useful for viewing what a superposition of the various basis modes would look like.
Parameters
----------
component = {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'Px' | 'Py' | 'Pz' | 'I' }, case-insensitive, optional
Plot the specified field component along the Z direction.
'E' is electric field, 'H' is magnetic field, 'P' is the Poynting vector, 'I' is Intensity, and 'x/y/z' chooses the component of each vector to return.
Defaults to "I".
mode_vector : array-like, optional
The mode-vector to plot. The mode-vector is a list with `get_N()` elements (as used in `Device.set_input()`), where each element is the amplitude & phase coefficient of each waveguide mode. Using the modes as a basis-set, you can construct any mode profile, as mode modes are included in the calculation.
If not specified, will use the currently-set input field, (Dev.input_field_left/right) corresponding to the chosen `side`.
side : { 'left' | 'right' }, optional
Which side of the device to get the launch mode for.
include_pml : { True | False }, optional
Include any perfectly-matched layers in the plot? True by default.
'''
"""
def mode(self,modeN):
'''Waveguide.mode(int): Return the specified pyFimm Mode object for this waveguide.'''
return Mode(self, modeN,"app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.")
For Device:
app.subnodes[1].subnodes[3].cdev.eltlist[1].wg.evlist.update
= self.nodestring + ".cdev.eltlist[1].wg.evlist."
"""
component = component.strip().lower()
modelist = range(0, self.get_N() ) # list like [0,1,2,3]
sideorig = side
side = side.lower().strip()
if side == 'left' or side == 'l' or side == 'lhs':
if mode_vector is None: mode_vector = self.input_field_left
n = self.elementpos[0] # 1st element
elif side == 'right' or side == 'r' or side == 'rhs':
if mode_vector is None: mode_vector = self.input_field_right
n = self.elementpos[-1] # last element
else:
ErrStr = "Unrecognized option for `side`: %s"%(sideorig)
raise ValueError(ErrStr)
'''
# normalize mode_vector
mag = np.sum( [np.abs(x) for x in mode_vector] )
mode_vector = np.array(mode_vector)/float(mag)
'''
# calculate modes of the element:
if DEBUG(): print 'Device "%s"' % self.name + '.plot_input_field(): Calculating modes of element ' + str(n) + '...'
fimm.Exec( self.nodestring + ".cdev.eltlist[%i].wg.evlist.update()" % n )
modes = Mode(self, modelist, self.nodestring + ".cdev.eltlist[%i].wg.evlist." % n )
fields = modes.get_field( component , include_pml=include_pml, as_list=True ) # returns list of all the fields
if DEBUG(): print "Dev.get_input_field():\n", "np.shape(fields) = ", np.shape(fields), "\n", "len(fields)=", len(fields), "\n", "len(fields[0])=", len(fields[0])
superfield = np.zeros_like( fields[0] ) # zeros with same dims as returned field
for i, field in enumerate(fields):
if DEBUG(): print "i=",i, "\n","mode_vector[i]=", mode_vector[i], "\n", "np.shape(field)=", np.shape(field)
if DEBUG(): print "get_input_field(): min/max(field) = %f/%f" % (np.min(np.array(field).real), np.max(np.array(field).real))
superfield = superfield + np.array(field) * mode_vector[i]
return superfield.transpose()
'''
- can get FimmWave to do this?
- Provided that you are only launching light from one end of the Device (either LHS or RHS) then the best way to do this is to export the forward (LHS) or backward (RHS) field profile at the launching end of the Device; this is the equivalent of right-click "\View XY field at..." in the GUI.
'''
#end get_input_field()
# Alias for the same function:
get_inc_field = get_input_field
def plot_input_field(self, component='I', mode_vector=None, side='left', include_pml=True, title=None, annotations=False, return_handles=False, plot_type='pseudocolor'):
'''Plot the input field. Useful for viewing what a superposition of the various basis modes would look like.
Parameters
----------
component = {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'Px' | 'Py' | 'Pz' | 'I' }, case-insensitive, optional
Plot the specified field component along the Z direction.
'E' is electric field, 'H' is magnetic field, 'P' is the Poynting vector, 'I' is Intensity, and 'x/y/z' chooses the component of each vector to return.
Defaults to "I".
mode_vector : array-like, optional
The mode-vector to plot. The mode-vector is a list with `get_N()` elements (as used in `Device.set_input()`), where each element is the amplitude & phase coefficient of each waveguide mode. Using the modes as a basis-set, you can construct any mode profile, as mode modes are included in the calculation.
If not specified, will use the currently-set input field, (Dev.input_field_left/right) corresponding to the chosen `side`.
side : { 'left' | 'right' }, optional
Which side of the device to get the launch mode for.
include_pml : { True | False }, optional
Include any perfectly-matched layers in the plot? True by default.
title : string, optional
Will prepend this text to the output filename, and do the same to the Plot Title.
If not provided, the name of the passed Waveguide component, Mode Number & Field Component will be used to construct the filename & plot title.
annotations : boolean, optional
If true, the effective index, mode number and field component will written on each mode plot. True by default.
plot_type : { 'pseudocolor' | 'contourf' }, optional
Plot the modes as pseudo-color (interpolated coloring, default) or filled contour?
return_handles : { True | False }, optional
If True, will return handles to the figure, axes and images. False by default.
Returns
-------
fig, axes, imgs
The matplotlib figure, axis and image (`pyplot.imshow()` ) handles. Only returned if `return_handles=True`
`fig` is the handle to the whole figure, allowing you to, for example, save the figure yourself (instead of using `Mode.save_plot()` ) via `fig.savefig(pat/to/fig.png)`.
`ax` is the handle of the single axis object on the figure.
`cont` is the handle to the contourf() plot (filled-contour).
'''
side = side.lower().strip()
if side == 'left' or side == 'l' or side == 'lhs':
sidestr = 'lhs'
n=1 # 1st element
if mode_vector is None: mode_vector = self.input_field_left
elif side == 'right' or side == 'r' or side == 'rhs':
sidestr = 'rhs'
n = self.elementpos[-1] # last element
if mode_vector is None: mode_vector = self.input_field_right
field = self.get_input_field(component=component, mode_vector=mode_vector, side=side, include_pml=include_pml)
if title:
plot_title = title + " - %s=%s" %(side, mode_vector)
else:
plot_title = '"%s": ' % self.name + "%s=%s" %(side, mode_vector)
# Options for the subplots:
sbkw = {'axisbg': (0.15,0.15,0.15)} # grey plot background
fig, ax = plt.subplots(nrows=1, ncols=1, subplot_kw=sbkw)
fig.suptitle(plot_title, fontsize=10) # figure title
fig.canvas.draw() # update the figure
# generate X & Y coords:
modestring = self.nodestring + ".cdev.eltlist[%i]"%(n) + ".get%sevlist"%(sidestr) + ".list[1].profile.data"
d = get_amf_data( modestring )
if DEBUG():
import pprint
print "Device.plot_input_field(): get_amf_data() returned:"
pprint.pprint(d)
x = np.linspace( d['xmin'], d['xmax'], num=d['nx'], endpoint=True )
y = np.linspace( d['ymin'], d['ymax'], num=d['ny'], endpoint=True )
if DEBUG(): print "(x, y) = ", x, y
#x = range( np.shape(field)[1] )
#y = range( np.shape(field)[0] )
if DEBUG(): print "Dev.plot_input_field(): min/max(field) = %f/%f" % (np.min(np.array(field).real), np.max(np.array(field).real))
maxfield = np.max( np.abs( np.array(field).real ) )
if plot_type is 'pseudocolor':
cont = ax.pcolor( np.array(x), np.array(y), np.array(field)[:-1,:-1] , vmin=-maxfield, vmax=maxfield, cmap=cm_hotcold) # cm_hotcold, cm.hot, RdYlBu, RdPu, RdBu, PuOr,
elif plot_type is 'contourf':
cont = ax.contourf( np.array(x), np.array(y), np.array(field)[:-1,:-1] , vmin=-maxfield, vmax=maxfield, cmap=cm_hotcold) # cm_hotcold, cm.hot, RdYlBu, RdPu, RdBu, PuOr,
else:
ErrStr = 'Device "%s".plot_input_field(): ' % self.name + 'Unrecognized plot_type: `%s`. ' % plot_type + 'Please use `contour` or `psuedocolor` or leave unsepcified.'
raise ValueError( ErrStr )
ax.set_xlim( d['xmin'], d['xmax'] )
ax.set_ylim( d['ymin'], d['ymax'] )
fig.canvas.draw()
if return_handles: return fig, ax, cont
#end plot_input_field()
# Alias for the above function:
plot_inc_field = plot_input_field
def set_input_beam(self, beam_pol, ref_z, h, w, inc_n, hor_tilt, ver_tilt, x_offset, y_offset, z_offset):
'''Set input to gaussian beam with corresponding parameters.
Parameters
----------
beam_pol : { 'TE', 'TM' }, case-insensitive, optional
Defaults to 45 degrees (halfway between TE & TM) with 90 degree phase delay.
ref_z : float
If ref_z == 0, then collimated beam. Otherwise, spherically-diverging beam with pivot distance of reference plane == ref_z.
h,w: float
gaussian beam height/width
inc_n: float
refractive index of input medium
horiz_tilt, vert_tilt: float
tilt of input beam
x/y/z_offset: float
offsets of the input beam's pivot point (around which to tilt)
'''
prj_num = self.parent.num
node_num = self.num
if beam_pol.strip().lower() == 'te':
fpString = self.nodestring + ".lhsinput.theta=0"+"\n"
fpString += self.nodestring + ".lhsinput.phi=0"+"\n"
elif beam_pol.strip().lower() == 'tm':
fpString = self.nodestring + ".lhsinput.theta=90"+"\n"
fpString += self.nodestring + ".lhsinput.phi=0"+"\n"
else:
fpString = self.nodestring + ".lhsinput.theta=45"+"\n"
fpString += self.nodestring + ".lhsinput.phi=90"+"\n"
fpString += self.nodestring + ".lhsinput.inputtype=3"+"\n" # input type = beam
fpString += self.nodestring + ".lhsinput.iproftype=1"+"\n" # gaussian
if ref_z == 0:
fpString += self.nodestring + ".lhsinput.phasetype=0"+"\n" # collimated
else:
fpString += self.nodestring + ".lhsinput.phasetype=1"+"\n" # spherical divergence
fpString += self.nodestring + ".lhsinput.gaussh={"+str(h)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.gaussw={"+str(w)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.n0={"+str(inc_n)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.h_tilt={"+str(hor_tilt)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.v_tilt={"+str(ver_tilt)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.pivxy.xalign=0"+"\n"
fpString += self.nodestring + ".lhsinput.pivxy.xoff={"+str(x_offset)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.pivxy.yalign=0"+"\n"
fpString += self.nodestring + ".lhsinput.pivxy.yoff={"+str(y_offset)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.pivz={"+str(z_offset)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.refdist={"+str(ref_z)+"}"+"\n"
fpString += self.nodestring + ".lhsinput.refrot=0"
fimm.Exec(fpString)
#end set_input_beam()
# Alias to the same function:
set_coupling_beam = set_input_beam
def get_coupling_loss(self,mode_n):
'''Return coupling loss in dB.
Corresponds to Fimmprops' `CalcModePower` command, converted to dB.
mode_n: integer
Which mode to calc loss for.
'''
prj_num = self.parent.num
node_num = self.num
power_frac = fimm.Exec(self.nodestring + ".calcmodepower("+str(mode_n+1)+")")
return -10*log10(power_frac)
#end get_coupling_loss()
# Alias to the same function:
coupling_loss = get_coupling_loss
def get_coupling_efficiency(self,mode_n):
'''Return coupling loss in fractional form (eg. 0->1).
Corresponds to Fimmprops' `CalcModePower` command.
mode_n: integer
Which mode to calc loss for.'''
prj_num = self.parent.num
node_num = self.num
power_frac = fimm.Exec(self.nodestring + ".calcmodepower("+str(mode_n+1)+")")
return power_frac
#end get_coupling_efficiency()
# Alias to same function:
coupling_efficiency = get_coupling_efficiency
###### Return Scattering Matrix ######
def R12(self):
'''Return scattering matrix for reflection at Left port.
The scattering matrix shows how the device converts one mode into a superposition of supported modes, with the complex coefficients describing the superposition.
Returns
-------
S[outputmode][inputmode]: numpy ndarray
NxN Array, where N is number of modes (see `obj.get_N()`).
'''
return np.array( self.Exec(".cdev.smat.ll") )
def S_ll(self):
'''Return Scattering Matrix Left-to-Left: Alias for R12(). See `help(R12)` for more info.'''
return self.R12()
def T12(self):
'''Return transmission scattering matrix from Left to Right.
The scattering matrix shows how the device converts one mode into a superposition of supported modes, with the complex coefficients describing the superposition.
Returns
-------
S[outputmode][inputmode]: numpy ndarray
NxN Array, where N is number of modes (see `obj.get_N()`).'''
#X = fimm.Exec(self.nodestring + ".cdev.smat.lr")
#if DEBUG(): print("X=", X )
#Y = strip_array( X )
#if DEBUG(): print ("Y=", Y)
#return np.array( Y )
return np.array( self.Exec(".cdev.smat.lr") )
def S_lr(self):
'''Return scattering Matrix Left-to-Right: Alias for T12(). See `help(T12)1 for more info.'''
return self.T12()
def R21(self):
'''Return reflection scattering matrix at Right port.
The scattering matrix shows how the device converts one mode into a superposition of supported modes, with the complex coefficients describing the superposition.
Returns
-------
S[outputmode][inputmode]: numpy ndarray
NxN Array, where N is number of modes (see `obj.get_N()`).'''
return np.array( self.Exec(".cdev.smat.rr") )
def S_rr(self):
'''Return scattering Matrix Right-to-Right: Alias for R21(). See `help(R21)` from more info.'''
return self.R21()
def T21(self):
'''Return transmission scattering matrix from Right to Left.
The scattering matrix shows how the device converts one mode into a superposition of supported modes, with the complex coefficients describing the superposition.
Returns
-------
S[outputmode][inputmode]: numpy ndarray
NxN Array, where N is number of modes (see `obj.get_N()`).'''
return np.array( self.Exec(".cdev.smat.rl") )
def S_rl(self):
'''Return scattering Matrix Right-to-Left: Alias for T21(). See `help(T21)` for more info.'''
return self.T21()
################################################################
#### ####
#### Plotting etc. ####
#### ####
################################################################
'''Each of these require the input to have been set by `set_input_field()`'''
def plot_refractive_index(self, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, calc=False, return_handles=False, title=None):
'''Plot the refractive index versus Z.
Calls `Device.plot()` with `component="index"`.
See `help(Device.plot)` for info on other arguments/options.
'''
if not calc:
if not self.calculated:
print "Device.plot_refractive_index(): Calculating the Device..."
calc=True
return self.plot('rix', zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc, return_handles=return_handles, title=title)
def get_refractive_index(self, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, calc=False):
'''Calls `Device.get_field()` to return the refractive index of the device. The `component` & `direction` options have been removed as compared with `get_field()`.
component : { X | Y | Z }, optional - NOT IMPLEMENTED YET
Which component of the refractive index tensor to return. For simple isotropic materials, these are all identical. Defaults to Z.
See `help(Device.get_field)` for info on the other options.
'''
if DEBUG(): print "Device.get_refractive_index(): "
if not calc:
if not self.calculated:
print "Device.get_refractive_index(): Calculating the Device..."
calc=True
return self.get_field( 'rix', zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc)
def get_field(self, component, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, direction='total', calc=False, warn=False):
'''Return the field specified by `component` versus Z.
Expects that the input field has been set with `set_input_field()`.
component = {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'Px' | 'Py' | 'Pz' | 'I' }, case-insensitive, required
Return the specified field component along the Z direction.
'E' is electric field, 'H' is magnetic field, 'P' is the Poynting vector, 'I' is Intensity, and 'x/y/z' chooses the component of each vector to return.
'index', 'rix' or 'ri' will return the refractive index, a functionality provided by the more convenient function `get_refractive_index()` but otherwise identical to this func.
direction = string { 'fwd', 'bwd', 'total' }, case insensitive, optional
Which field propagation direction to plot. Defaults to 'total'.
Note that the propagation direction should match up with which side the input field was launched. Eg. for `set_input_field([1,0,0], side="left")` you'll want to use `direction="fwd"`, meaning propagating to the right (+z).
Synonyms for 'fwd' include 'forward', 'f', 'right', 'r', '+z'.
Synonyms for 'bwd' include 'backward', 'b', 'left', 'l', '-z'.
Synonyms for 'total' include 'tot' & 't'.
Defaults to 'total'.
xcut, ycut = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
zpoints = integer, optional
Number of points to acquire in the field. Defaults to 3000.
zmin, zmax = float, optional
min & max z-coorinates. Defaults to 0-->Device Length (plot entire Device).
calc = { True | False }
Tell FimmProp to calculate the fields? Only needs to be done once to store all field components & refractive indices (for a given `zpoints`, `xcut` etc.), so it is useful to prevent re-calculating after the first time. False by default.
cut = tuple of two floats - NOT IMPLEMENTED YET
Specify coordinate plane on which to plot fields. Default (0,0).
If dir='Z', then tuple is (x,y).
If dir='Y', then tuple is (x,z).
If dir='X', then tuple is (y,z).
warn : Boolean
Print wanring messages? True by default.
Returns
-------
List of complex values corresponding to field values, starting at z=0 and ending at specified `zmax`.
Examples
--------
Get the Total Ex field at x,y=(0,0) along Z, along the whole Device.
>>> field = Dev.fields('Ex')
Get the refractive index at x,y=(0,0) along Z, along the whole Device.
>>> field = Dev.fields('index')
'''
# 1st arg: Figure out which component string to send FimmWave:
component = component.lower().strip()
if component == 'Ex'.lower():
compstr='Ex'
elif component == 'Ey'.lower():
compstr='Ey'
elif component == 'Ez'.lower():
compstr='Ez'
elif component == 'Hx'.lower():
compstr='Hx'
elif component == 'Hy'.lower():
compstr='Hy'
elif component == 'Hz'.lower():
compstr='Hz'
elif component == 'I'.lower():
compstr='Intensity'
elif component == 'px':
compstr='Pxx'
elif component == 'py':
compstr='Pyy'
elif component == 'pz':
compstr='Pzz'
elif component=='rix' or component=='index' or component=='ri':
compstr='RefZZ' # plots Z-to-Z component of RIX tensor only - assuming simple homogeneous material
else:
raise ValueError("Device.field(): Invalid field component requested: `"+str(component)+"`.")
if direction != 'Total':
direction = direction.lower().strip() # lower case & strip whitespace
if direction=='fwd' or direction=='forwards' or direction=='forward' or direction=='f' or direction=='right' or direction=='r' or direction=='+z':
dirstr = 'Fwg'
elif direction=='bwd' or direction=='backwards' or direction=='backward' or direction=='b' or direction=='left' or direction=='l' or direction=='-z':
if component=='i':
'''Due to Fimmwave typo bug: should be Title case. '''
dirstr = 'bwg' # fieldstr for bwd intensity is 'Intensitybwd'
else:
'''for every other component, it's "ExBwg" with TitleCase. '''
dirstr = 'Bwg'
elif direction=='total' or direction=='tot' or direction=='t':
dirstr = 'Total'
else:
ErrStr = "Device.get_field(): Unrecognized `direction` passed: `%s`."%(direction)
raise ValueError(ErrStr)
fieldstr = compstr + dirstr #attribute of FimmWave `zfieldcomp` object
if not zmax: zmax = self.get_length()
# Extract the field values:
NumPoints = zpoints # params for calczfield()
xpoint = xcut; ypoint=ycut
prj_num = self.parent.num
node_num = self.num
# Tell FimmProp to calculate the Z fields:
if not calc:
if not self.calculated:
if warn or WARN(): print "WARNING: Device.get_field(): Device `%s` was not calculated before extracting fields - may return [zeros]."%(self.name)
#print "Device.get_field(): Calculating the Device..."
#self.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
else:
self.calc(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut)
#if calc: self.calc(zpoints=NumPoints, zmin=zmin, zmax=zmax, xcut=xpoint, ycut=ypoint)
#fimm.Exec("app.subnodes[{"+ str(prj_num) +"}].subnodes[{"+ str(node_num) +"}]."+"calczfield("+ str(NumPoints) +","+ str(zmin) +", "+ str(zmax) +","+ str(xpoint) +","+ str(ypoint) +",1)" +"\n")
# Extract the field values:
fpString = self.nodestring + "."+"zfieldcomp."+fieldstr+"\n"
zfield = fimm.Exec(fpString)
zfield = zfield[0][1:] # remove the first `None` entry & EOL char.
return zfield
#end field()
# Alias to same function:
field = get_field
def plot(self, component, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, direction='total', refractive_index=False, return_handles=False, calc=False, title=None, warn=False):
'''Plot the fields in this device along the Z (propagation) direction.
Requires that the input field has been set with `set_input_field()`.
Parameters
----------
component = {'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'Px' | 'Py' | 'Pz' | 'I' }, case-insensitive, required
Plot the specified field component along a specified direction.
'E' is electric field, 'H' is magnetic field, 'P' is the Poynting vector, 'I' is Intensity, and 'x/y/z' chooses the component of each vector to return.
'index', 'rix' or 'ri' will plot the refractive index, a functionality also provided by the argument `refractive_index=True`.
direction = string { 'fwd', 'bwd', 'total' }, case-insensitive, optional
Which field propagation direction to plot. Defaults to 'total'.
Note that the propagation direction should match up with which side the input field was launched. Eg. for `set_input_field([1,0,0], side="left")` you'll want to use `direction="fwd"`.
Synonyms for 'fwd' include 'forward', 'f', 'right', 'r', '+z'.
Synonyms for 'bwd' include 'backward', 'b', 'left', 'l', '-z'.
Synonyms for 'total' include 'tot' & 't'.
refractive_index = { True | False }
If True, will plot the refractive index of the structure on a second axis, with shared X-axis (so sooming etc. zooms both X axes). Default is False.
xcut, ycut = float, optional
x & y coords at which to cut the Device along Z. Both default to 0.
zpoints = integer, optional
Number of points to acquire in the field. Defaults to 3000.
zmin, zmax = float, optional
min & max z-coorinates. Defaults to 0-->Device Length.
calc = { True | False }
Tell FimmProp to calculate the fields? Only needs to be done once to store all field components & refractive indices (for a given `zpoints`, `xcut` etc.), so it is useful to prevent re-calculating after the first time.
return_handles = { True | False }, optional
If True, will return handles to the figure, axes, legends and lines. False by default.
title = str, optional
Pre-pend some text to the plot title.
cut = tuple of two floats - NOT IMPLEMENTED YET
Specify coordinate plane on which to plot fields. Default (0,0).
If dir='Z', then tuple is (x,y).
If dir='Y', then tuple is (x,z).
If dir='X', then tuple is (y,z).
warn : boolean
Print warning messages for unset default values etc.? Defaults to True.
Returns
-------
handles : tuple of (fig1, axes, lines, leg)
If `return_handles=True`, returns matplotlib handles to the plot's objects, as so:
fig1 : main figure object
axes : Each axis. If `refractive_index=True` then axes = ( Field_Axis , RI_Axis ), otherwise just = Field_Axis handle.
lines : Each curve plotted. If `refractive_index=True` then lines = ( RI_line, Field_Line_Mode_0, Field_Line_Mode_1 , ... Field_Line_Mode_N ), otherwise handle RI_Line is omitted.
leg : legend of main Field axis, containing one legend entry for each mode number.
Examples
--------
Plot Fields of the Device given some injected mode vector:
>>> DeviceObj.set_input_field( [1,0,0] ) # launch 1st mode only, into left side.
>>> DeviceObj.set_input_field( [0,0,0], side='right' ) # launch nothing into right side.
>>> DeviceObj.mode( 0 ).plot('Ex') # plot Ex propagating in +z direction
>>> DeviceObj.mode( 'all' ).plot('Hy', direction='left') # plot Hy for all modes on one plot, propagating in left (-z) direction.
>>> DeviceObj.mode( 0 ).plot('Ex', refractive_index=True) # plot Ex Total of Mode 0, with Refractive Index profile plotted on separate axis
>>> fig, axis, line, leg = DeviceObj.mode( 0 ).plot('Ex', return_handles=True) # plot Ex Total of Mode 0 and return matplotlib handles to the figure's elements
'''
RIplot = refractive_index
# Component string for plot title:
component = component.lower().strip()
if component == 'Ex'.lower():
compstr='Ex'
elif component == 'Ey'.lower():
compstr='Ey'
elif component == 'Ez'.lower():
compstr='Ez'
elif component == 'Hx'.lower():
compstr='Hx'
elif component == 'Hy'.lower():
compstr='Hy'
elif component == 'Hz'.lower():
compstr='Hz'
elif component == 'I'.lower():
compstr='Intensity'
elif component=='rix' or component=='index' or component=='ri':
compstr='Refr. Index' # plots Z-to-Z component of RIX tensor only - assuming simple homogeneous material
else:
raise ValueError("Device.plot(): Invalid field component requested.")
# Direction for plot title:
if direction=='fwd' or direction=='forwards' or direction=='forward' or direction=='f' or direction=='right' or direction=='r' or direction=='+z':
dirstr = 'Right (+z)'
elif direction=='bwd' or direction=='backwards' or direction=='backward' or direction=='b' or direction=='left' or direction=='l' or direction=='-z':
dirstr = 'Left (-z)'
elif direction=='total' or direction=='tot' or direction=='t' or direction=='Total':
dirstr = 'Total'
else:
ErrStr = "Device.plot(): Unrecognized `direction` passed: `%s`."%(direction)
#raise ValueError(ErrStr)
if warn or WARN(): print "WARNING: Unrecognized `direction` passed: `%s`."%(direction)
dirstr=direction
if not calc:
if not self.calculated:
print "Device.plot(): Calculating the Device..."
calc=True
zfield = self.get_field(component, zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction=direction, calc=calc)
# plot the field values versus Z:
zfield = np.array(zfield)
TotalLength = self.get_length()
z = np.linspace( 0, TotalLength, num=len(zfield) ) # Z-coord
if DEBUG(): print "Device.plot(): len(zfield)=%i"%(len(zfield) )
if DEBUG(): print "np.shape(zfield)=", np.shape(zfield)
if DEBUG(): print "z(%i) = "%len(z), z
if RIplot:
rix = self.get_refractive_index(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, calc=False)
fig1, (ax1,ax2) = plt.subplots(2, sharex=True) # 2 axes
# Reduce axis width to 80% to accommodate legend:
#box = ax2.get_position()
#ax2.set_position([ box.x0, box.y0, box.width * 0.8, box.height])
l2 = [ ax2.plot(z, np.array(rix).real, 'g-', label="Refractive Index" ) ] # plot RIX on 2nd sibplot
else:
fig1, ax1 = plt.subplots(1, 1) # 1 axis
# Reduce axis width to 80% to accommodate legend:
#box = ax1.get_position()
#ax1.set_position([ box.x0, box.y0, box.width * 0.8, box.height])
l1 = [];
#l1 = []; l2 = []; leg1 = []; leg2=[]
l1.append( ax1.plot(z, np.real(zfield), '-' ) )
#leg1.append("Real")
#end for(modenum)
ax1.set_ylabel( "Field %s"%(compstr) )
titlestr = "%s: %s %s vs. Z"%(self.name, compstr,dirstr)
if title: titlestr = title + ": " + titlestr
ax1.set_title( titlestr )
ax1.grid(axis='both')
#plt.legend()
if RIplot:
ax2.set_ylabel('Refractive Index')
ax2.set_xlabel(r"Z, ($\mu{}m$)")
ax2.grid(axis='both')
else:
ax1.set_xlabel(r"Z, ($\mu{}m$)")
#leg = plt.legend()
#leg = ax1.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
#leg2 = ax2.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
fig1.canvas.draw(); fig1.show()
# return some figure handles
if return_handles:
if RIplot:
return fig1, (ax1, ax2), (l1, l2)
else:
return fig1, ax1, l1
#end plot()
################################################################
#### ####
#### Node Builders ####
#### ####
################################################################
def buildNode(self, name=None, parent=None, overwrite=False, warn=False):
'''Build the Fimmwave node of this Device.
Parameters
----------
name : string, optional
Provide a name for this waveguide node. Will overwrite a previously specified existing name.
parent : Node object, optional
Provide the parent (Project/Device) Node object for this waveguide. If specified previously by Device.parent=<parentNode>, this will overwrite that setting.
overwrite : { True | False }, optional
Overwrite existing node of same name? Defaults to False, which will rename the node if it has the same name as an existing node.
warn : {True | False}, optional
Print notification if overwriting a node? True by default.
To Do:
------
Add optional argument `build_elements = True`, which will build all passed WG objects while adding them to the Device.
'''
if self.built: raise UserWarning( 'Device "%s".buildNode(): Device is already built in FimmWave! Aborting.'%(self.name) )
if name: self.name = name
if parent: self.set_parent(parent)
#parent.children.append(self)
'''
nodestring="app.subnodes["+str(self.parent.num)+"]"
self._checkNodeName(nodestring, overwrite=overwrite, warn=warn) # will alter the node name if needed
'''
#nodestring = parent.nodestring
check_node_name(self.name, self.parent.nodestring, overwrite=overwrite, warn=warn)
self.jointpos = [] # eltlist[] position of simple joints
self.elementpos = [] # eltlist[] position of each waveguide element
#N_nodes = fimm.Exec("app.subnodes["+str(self.parent.num)+"].numsubnodes()")
N_nodes = fimm.Exec( self.parent.nodestring+".numsubnodes()")
node_num = int(N_nodes)+1
self.num = node_num
prj_num = self.parent.num
node_name = self.name
if DEBUG(): print "Device.buildNode(): ",len(self.elements), " elements."
# create new FimmProp Device
fimm.Exec(self.parent.nodestring + ".addsubnode(FPdeviceNode,"+str(node_name)+")"+"\n")
self.nodestring = self.parent.nodestring + ".subnodes[%i]"%(node_num)
elnum = 0 # element number in the Device - 1st/left-most is 1, next is 2, next is 3.
fpString = ""
# set device wavelength:
fpString += self.nodestring + ".lambda = " + str(self.get_wavelength()) + " \n"
if get_material_database():
fpString += self.nodestring + ".setmaterbase(" + get_material_database() + ") \n"
# newwgsect options:
num2 = 1 # 0 = use Device parameters, 1 = use WG parameters
jtype_warning = True # warning flag for joint-type override
for ii,el in enumerate(self.elements):
elnum = elnum+1
if isinstance( el, Taper ):
'''I am not testing the Taper at all - not sure if this actually works.
But keeping it here just in case it does.'''
if DEBUG(): print "Device.buildNode(): type = Taper"
fpString += self.__BuildTaperNode( el, elnum )
el.built = True
self.elementpos.append(elnum)
# Set the WG length:
fpString += self.nodestring + ".cdev.eltlist["+str(elnum)+"].length="+str(self.lengths[ii]) + " \n"
elif isinstance( el, Lens ):
'''The Lens object will be a Waveguide Lens element.'''
if DEBUG(): print "Device.buildNode(): type = Lens"
fpString += self.__BuildLensElement( el, elnum )
el.built = True
self.elementpos.append(elnum)
else:
'''For all waveguide elements, add the previously built WG Node to this Device:'''
if el.built != True:
'''If the WG was not previously built, tell it to build itself. '''
try:
print self.name + ".buildNode(): Attempting to build the unbuilt element:", el.name
el.buildNode() # tell the element to build itself
except:
try:
elname = el.name
except AttributeError:
elname=el.__repr__()
errstr = "Error while building Device Node `"+self.name+"`: \nA constituent element `" +elname+ "` could not be built. Perhaps try building all waveguide nodes via `WGobj.buildNode()` before building the Device."
raise RuntimeError(errstr)
if DEBUG(): print "Device.buildNode(): %i: type(el)=%s, name=%s"%(ii, str(type(el)), el.name)
# Add the waveguide node into this Device:
# (assumes WG Node is in the root-level of this FimmWave Project)
fpString += self.nodestring + ".cdev.newwgsect("+str(elnum)+","+"../"+el.name+","+str(num2)+") \n"
self.elementpos.append(elnum) # save the element number (elt) of this WG element.
# Set the WG length:
fpString += self.nodestring + ".cdev.eltlist["+str(elnum)+"].length="+str(self.lengths[ii]) + " \n"
#end if(is Taper/Lens/etc.)
if ii != len(self.elements)-1:
'''Add a simple joint between waveguides.'''
elnum = elnum+1
fpString += self.nodestring + ".cdev.newsjoint("+str(elnum)+")"+"\n"
# Set the Joint method : 0="complete" 1=normal Fresnel, 2=oblique Fresnel, 3=special complete
# get joint types:
if self.get_joint_type() == None:
jtype = el.get_joint_type(True) # Element-level joint-type
else:
jtype = self.get_joint_type(True) # Device-level joint-type
if jtype != el.get_joint_type(True) and jtype_warning:
print "Warning: " + self.name + ".buildNode(): settings for Device joint type do not match those of element #" + str(elnum-1) + " (of type " + str(type(el)) + "). The Device setting will override the element's setting. This warning will be suppressed for the rest of the build."
jtype_warning = False # suppress this warning from now on
#end if(joint type)
fpString += self.nodestring + ".cdev.eltlist["+str(elnum)+"].method="+str( jtype )+"\n"
self.jointpos.append(elnum) # add position of this joint to the joints list
#end for(ii,elements)
# Set wavelength:
fpString += self.nodestring + ".lambda = " + str( self.get_wavelength() ) + " \n"
fimm.Exec(fpString) # it is MUCH faster to send one giant string, rather than Exec'ing many times.
self.built=True
#end buildNode()
################################################################
## Tapers
################################################################
def __BuildLensElement(self, el, elnum ):
'''FimmProp commands to build a Waveguide Lens node. Most of the commands will come from the Lens object itself.'''
if DEBUG(): print "__BuildLensElement(): base WG = %s"%(el.wgbase.name)
node_num = self.num
prj_num = self.parent.num
#node_name = el.lhs.name
fpString=""
fpString += "app.subnodes[{"+str(prj_num)+"}].subnodes[{"+str(node_num) + "}].cdev.newwglens({"+str(elnum)+"},../"+str(el.wgbase.name) + ")"+"\n" # add the WGLens element
nodestring = "app.subnodes[{"+str(prj_num)+"}].subnodes[{"+str(node_num) + "}].cdev.eltlist[{"+str(elnum)+"}]"
#fpString += nodestring + ".length={"+str(el.length)+"}"+"\n"
fpString += el.get_buildNode_str(nodestring) # get the rest of the solver params build from the object itself
'''TO DO:
set el.length, by calculating from Radius.'''
return fpString
#end __BuildLensElement
def __BuildTaperNode(self, el, elnum):
'''FimmProp commands to build a Taper Node.
NOT TESTED YET
'''
if DEBUG(): print "__BuildTaperNode():"
node_num = self.num
prj_num = self.parent.num
node_name = self.name
fpString=""
fpString += self.nodestring + ".cdev.newtaper({"+str(2*ii+1)+"},../"+str(el.lhs)+",../"+str(el.rhs)+")"+"\n"
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].length={"+str(el.length)+"}"+"\n"
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].shape_type=0"+"\n"
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].itpfunc.string=\""+str()+"\""+"\n"
if el.method == 'full':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].int_method=0"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].int_method=1"+"\n"
if mode_solver() == 'vectorial FDM real' or mode_solver() == 'semivecTE FDM real' or mode_solver() == 'semivecTM FDM real' or mode_solver() == 'vectorial FDM complex' or mode_solver() == 'semivecTE FDM complex' or mode_solver() == 'semivecTM FDM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].enableevscan=0"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].enableevscan=1"+"\n"
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.autorun=1"+"\n"
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.speed=0"+"\n"
if horizontal_symmetry() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.hsymmetry=0"+"\n"
else:
if horizontal_symmetry() == 'none':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.hsymmetry=0"+"\n"
elif horizontal_symmetry() == 'ExSymm':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.hsymmetry=1"+"\n"
elif horizontal_symmetry() == 'EySymm':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.hsymmetry=2"+"\n"
else:
print self.name + '.buildNode(): Invalid horizontal_symmetry. Please use: none, ExSymm, or EySymm'
if vertical_symmetry() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.vsymmetry=0"+"\n"
else:
if vertical_symmetry() == 'none':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.vsymmetry=0"+"\n"
elif vertical_symmetry() == 'ExSymm':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.vsymmetry=1"+"\n"
elif vertical_symmetry() == 'EySymm':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.vsymmetry=2"+"\n"
else:
print self.name + '.buildNode(): Invalid horizontal_symmetry. Please use: none, ExSymm, or EySymm'
if N() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.maxnmodes={10}"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.maxnmodes={"+str(N())+"}"+"\n"
if NX() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.nx={60}"+"\n"
nx_svp = 60
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.nx={"+str(NX())+"}"+"\n"
nx_svp = NX()
if NY() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.ny={60}"+"\n"
ny_svp = 60
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.ny={"+str(NY())+"}"+"\n"
ny_svp = NY()
if min_TE_frac() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.mintefrac={0}"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.mintefrac={"+str(min_TE_frac())+"}"+"\n"
if max_TE_frac() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.maxtefrac={100}"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.maxtefrac={"+str(max_TE_frac())+"}"+"\n"
if min_EV() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.evend={-1e+050}"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.evend={"+str(min_EV())+"}"+"\n"
if max_EV() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.evstart={1e+050}"+"\n"
else:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].mlp.evend={"+str(max_EV())+"}"+"\n"
if RIX_tol() is None:
rix_svp = 0.010000
else:
rix_svp = RIX_tol()
if N_1d() is None:
n1d_svp = 30
else:
n1d_svp = N_1d()
if mmatch() is None:
mmatch_svp = 0
else:
mmatch_svp = mmatch()
if mode_solver() is None:
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=71"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
else:
if mode_solver() == 'vectorial FDM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=71"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'semivecTE FDM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=23"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'semivecTM FDM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=39"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'vectorial FDM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=79"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'semivecTE FDM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=31"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'semivecTM FDM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=47"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif mode_solver() == 'vectorial FMM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=65"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif mode_solver() == 'semivecTE FMM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=17"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif mode_solver() == 'semivecTM FMM real':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=33"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif mode_solver() == 'vectorial FMM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=73"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif mode_solver() == 'semivecTE FMM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=25"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif mode_solver() == 'semivecTM FMM complex':
fpString += self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.solvid=41"+"\n"
solverString = self.nodestring + ".cdev.eltlist[{"+str(2*ii+1)+"}].svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
else:
print self.name + '.buildNode(): Invalid Mode Solver. Please use: '
print ' vectorial FDM real, semivecTE FDM real,semivecTM FDM real, '
print ' vectorial FDM complex, semivecTE FDM complex , semivecTM FDM complex, '
print ' vectorial FMM real, semivecTE FMM real, semivecTM FMM real, '
print ' vectorial FMM complex, semivecTE FMM complex, or semivecTM FMM complex'
fpString += solverString
return fpString
#end __BuildTaperNode()
####################################
####################################
#### Junk Funcs ####
####################################
####################################
'''Functions that are not used anymore, but were a huge achievement when they were first made,
so they are kept here for nostalgic purposes only. -- Demis '''
def __buildNode2(self, name=None, parentNode=None):
'''Build the Fimmwave node of this Device.
NOTE: This function is deprecated - replaced by buildNode, which re-uses existing waveguide nodes in the fimmwave top-level.
This function instead re-builds new WG nodes below the Device node and references those.
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
parent : Node object, optional
provide the parent (Project/Device) Node object for this waveguide.'''
if name: self.name = name
if parentNode: self.parent = parentNode
self.jointpos = [] # eltlist[] position of simple joints
self.elementpos = [] # eltlist[] position of each waveguide element
N_nodes = fimm.Exec("app.subnodes["+str(self.parent.num)+"].numsubnodes()")
node_num = int(N_nodes)+1
self.num = node_num
prj_num = self.parent.num
node_name = self.name
# build FimmProp device
fimm.Exec("app.subnodes[{"+str(prj_num)+"}].addsubnode(FPdeviceNode,"+str(node_name)+")"+"\n")
elnum = 0 # element number in the Device
fpString = ""
if DEBUG(): print "Device.buildNode(): ",len(self.elements), self.elements[0], self.elements[1]
for ii,el in enumerate(self.elements):
elnum = elnum+1
if DEBUG(): print "Device.buildNode(): %i: type(el)="%(ii), type(el)
if isinstance( el, Waveguide ):
if DEBUG(): print "Device.buildNode(): __BuildWaveguideNode()"
print "WARNING: programming of waveguide concatenation in a device is not complete."
fpString += self.__BuildWaveguideNode( el, elnum )
self.elementpos.append(elnum)
elif isinstance( el, Taper ):
if DEBUG(): print "Device.buildNode(): __BuildTaperNode()"
fpString += self.__BuildTaperNode( el, elnum )
self.elementpos.append(elnum)
elif isinstance( el, Circ ):
if DEBUG(): print "Device.buildNode(): __BuildCylNode()"
fpString += self.__BuildCylNode( el, elnum )
self.elementpos.append(elnum)
else:
raise TypeError("Device.buildNode(): Waveguide Type `" + str( type(el) ) + "` not supported.")
#end if(el.type)
# Make the new waveguide node
fimm.Exec(fpString); fpString=""
if ii != len(self.elements)-1:
'''Add a simple joint between waveguides.'''
elnum = elnum+1
fpString += self.nodestring + ".cdev.newsjoint("+str(elnum)+")"+"\n"
# could choose method for the Simple Joint here:
fpString += self.nodestring + ".cdev.eltlist["+str(elnum)+"].method=0"+"\n"
self.jointpos.append(elnum) # add position of this joint to the joints list
# Make the joint
fimm.Exec(fpString); fpString=""
#end for(ii,elements)
#fimm.Exec(fpString)
self.built=True
#end buildNode2()
################################################################
## Cylindrical
################################################################
def __BuildCylNode(self, el, elnum):
'''Send the FimmProp commands to build a Fiber WG (Cylindrical) Node.
NOTE: Deprecated - we just reference a previously built WG now instead of building a whole new one under the Device node.
'''
if DEBUG(): print "__BuildCylNode():"
node_num = self.num # the Device node number
prj_num = self.parent.num # the Project node number
node_name = el.name # name of Device
wgtypestr = "fwguideNode"
subnode_num = fimm.Exec( self.nodestring + ".numsubnodes() " );
subnode_num = int(subnode_num) + 1 # the WG node number under the Device Node
el.subnodenum = subnode_num # Which subnode the WG is built under; not sure if we'll use this later, but setting it anyway
el.elnum = elnum # which element in the Device this WG is used for.
if DEBUG(): print "element info: ", el, el.name, el.subnodenum
fpString=""
'''Create WG Node under dev. node.'''
if DEBUG(): print "subnode_num=", subnode_num
fpString += self.nodestring + ".addsubnode("+wgtypestr+","+str(el.name)+") \n"
fimm.Exec(fpString); fpString=''
NodeStr = self.nodestring + ".subnodes[{"+str(subnode_num)+"}]"
self.nodestr = NodeStr
fimm.Exec( el.get_buildNode_str(NodeStr ) ) # build the Node using object's own get_buildNodeStr()
'''Add this waveguide to the device.'''
# one of these is dev length, the other is whether to use device or wg params
num1 = elnum # position - 1st/left-most is 1, next is 2, next is 3.
num2 = 1 # 0 = use Device parameters, 1 = use WG parameters
#####
fpString += self.nodestring + ".cdev.newwgsect("+str(num1)+","+el.name+","+str(num2)+") \n"
#if DEBUG(): print "__BuildCylNode(): fpString=\n", fpString
return fpString
#end __BuildCylNode2()
################################################################
## Waveguide
################################################################
def __BuildWaveguideNode(self, el, elnum):
'''FimmProp commands to build a waveguide Node
NOTE: Deprecated - we now just reference a previously built WG now instead of building a whole new one under the Device node.
'''
if DEBUG(): print "__BuildWaveguideNode():"
node_num = self.num
prj_num = self.parent.num
node_name = el.name
wgtypestr = "rwguideNode"
subnode_num = fimm.Exec( self.nodestring + ".numsubnodes() " );
subnode_num = int(subnode_num) + 1 # the WG node number under the Device Node
el.subnodenum = subnode_num # Which subnode the WG is built under; not sure if we'll use this later, but setting it anyway
el.elnum = elnum # which element in the Device this WG is used for.
if DEBUG(): print "element info: ", el, el.name, el.subnodenum
fpString=""
'''Create WG Node under dev. node.'''
if DEBUG(): print "subnode_num=", subnode_num
fpString += self.nodestring + ".addsubnode("+wgtypestr+","+str(el.name)+") \n"
fimm.Exec(fpString); fpString=''
NodeStr = self.nodestring + ".subnodes[{"+str(subnode_num)+"}]"
self.nodestr = NodeStr
fimm.Exec( el.get_buildNode_str(NodeStr ) ) # build the Node using object's own get_buildNodeStr()
'''Add this waveguide to the device.'''
# one of these is dev length, the other is whether to use device or wg params
num1 = elnum # position - 1st/left-most is 1, next is 2, next is 3.
num2 = 1 # 0 = use Device parameters, 1 = use WG parameters
#####
fpString += self.nodestring + ".cdev.newwgsect("+str(num1)+","+el.name+","+str(num2)+") \n"
# Set the WG length:
fpString += self.nodestring + ".cdev.eltlist["+str(elnum)+"].length="+str(el.length) + " \n"
return fpString
#end __BuildWaveguideNode()
#end class(Device)
# Create new Device objects by importing from another Project:
def _import_device( obj='device', project=None, fimmpath=None, name=None, overwrite=False, warn=False ):
'''This function allows you to use the FimmProp GUI for Device construction, and then interact with those Devices via pyFIMM (acquiring fields, saving plots etc.).
The Device's parent Project should have been created in pyFIMM beforehand. To grab a Device from a file, use `newprj = pyFIMM.import_Project()` to generate the Project from a file, and then call `newprj.import_Device()`.
If this function is called as a method of a pyFIMM Project object (`ProjectObj.import_device()`) then the target FimmProp Device will be copied into the calling pyFIMM Project's corresponding FimmProp project, and the device returned will point to that.
To ensure the imported Device can reference the needed Waveguides/Slabs from the original Project, it is easiest if the required waveguide/slab nodes are subnodes of the original device node - they will then be copied automatically into the new Project. If this is not possible, first use the function `Project.import_Node()` to copy the required FimmProp Nodes into the calling Project.
import_device() will not inspect the elements and waveguides used in the Device's construction. This is to enable the use of the many complex element types available in FimmProp that aren't supported by pyFIMM - for example etch/grow paths, various types of joints etc. These specialized elements/joints won't be inspected by pyFIMM, but you can still insert your Device into other Devices, launch/retrieve fields etc. via pyFIMM.
Device.get_origin() will return 'fimm' for this new Device, indicating that it was constructed in FimmWave and the elements it contains will not correspond to pyFIMM waveguide objects.
Parameters
----------
target : { 'device' | Project object }, optional
If this func is called from within a Project object, this argument is set to the parent Project object, ie. `self`. The function will then attempt to copy the FimmProp Device into the calling FimmProp Project.
If the string 'device' is passed, the function will return a new Device object without copying the FimmWave nodes - leaving the Device in it's original FimmProp Project.
project : pyFIMM Project object, required
Specify the pyFIMM Project from which to acquire the Device.
fimmpath : string, required
The FimmProp path to the Device, within the specified project. This takes the form of something like "DevName" if the device named "DevName" is at the top-level of the FimmProp Project, or "NodeName/SubDevName" if SubDevName is under another Node.
name : string, optional
Optionally provide a name for the new Device node in Fimmwave. If omitted, the name found in the Project will be used.
overwrite : { True | False }, optional
If True, will overwrite an existing Fimmwave Device if Fimmwave reports a name-conflict. If False, will append random digits to the to Device's name. False by default.
warn : { True | False }, optional
Print or suppress warnings when nodes will be overwritten etc. True by default.
Parameters of the returned Device are a bit different from one generated entirely by pyFIMM, as detialed below:
Please type `dir(DeviceObj)` or `help(DeviceObj)` to see all the attributes and methods available.
Attributes
----------
The returned Device object will most of the same attributes as a standard pyFIMM Device object, with the following exceptions:
DevObj.origin : { 'fimmwave' }
This indicates that this Device was Not constructed by pyFIMM, and so has a slightly lacking set of attributes (detailed further in this section). A normally-constructed pyFIMM Device has the value 'pyfimm'.
DevObj.num : nonexistent
Obsoleted. Instead, use the attribute `DevObj.nodestring` to reference the device object in FimmWave.
DevObj.elements : (empty list)
To allow for all the various Element construction methods available in FimmWave (eg. etch/grow paths etc.), pyFIMM will not populate the elements list of the imported Device.
However, `.elementpos` and `.jointpos` will be populated properly so that you can differentiate between joints and waveguide elements. Note that Free-Space joints will be added to `*.elementpos` despite being in the "joints" section of the FimmProp GUI, because they have a length and are thus more appropriately treated as finite-length elements.
DevObj.elementpos : list
List of element positions (used in FimmProp's `DevNode.cdev.eltlist[%i]`) for referencing a particular element. Elements that are references will have an entry corresponding to the original element, which might be a string for Nodes inserted into the device as references.
In-progress: DevObj.referencepos : list
For referenced elements, contains either the location of the original element, or path to the original Node.
DevObj.lengths : list
The length, in microns, of each element that can have a length (these elements are referenced in `DevObj.elementpos`). Unsupported elements, such as the WGLens (which don't have a simple calculation of length) will have a `None` entry in the list.
DevObj.jointpos : list
List of positions (used in FimmProp's `DevNode.cdev.eltlist[%i]`) of joints that have no length, eg. Simple-Joints, IO-Sections.
Examples
--------
To open a Device from a file, import the project file first:
>> prj = pyfimm.import_project( 'C:\pyFIMM Simulations\example4 - WG Device 1.prj' )
Create a new pyFIMM Device pointing to the FimmProp Device in the imported Project:
>>> DevObj = pyfimm.import_device( prj, "Name Of My Device In The Project" )
The string "Name Of..." as actually a FimmWave path, so could reference subnodes like "ParentDev/TheDeviceIWant".
Or copy the Device into a new pyFIMM Project:
>>> prj2 = pyfimm.Project( 'New PyFIMM Project', build=True )
>>> DevObj = prj2.import_device( prj, "Name Of My Device In The Project" )
If the Device relies on other waveguides & slabs, it's easiest if those WGs/slabs are stored as SubNodes of the Device to copy, such that they are copied along with the Device. If they aren't stored as SubNodes, then you'll want to import those dependency nodes individually via `Project.import_node()`.
'''
'''Note that `obj` will be a Project object, if this function is called from the Project object's methods'''
if (project is None) or (fimmpath is None):
ErrStr = "import_device(): The `project` and `fimmpath` arguments are required! Please specify these parameters."
raise ValueError( ErrStr )
if DEBUG(): print "import_device( project.name='%s', fimmpath='%s' )"%(project.name, fimmpath)
dev = Device() # new, empty, pyFIMM Device object
dev.elements = None
dev.num = None
dev.set_parent( project )
dev.origin = 'fimmwave' # Device was constructed in FimmProp, not pyFIMM
dev.name = fimmpath.split('/')[-1] # get the last part of the path
devname = "Device_%i" %( get_next_refnum() ) # generate dev reference name
# create fimmwave reference to the Device:
fpStr = 'Ref& %s = %s'%(devname,project.nodestring) + '.findnode("%s")'%(fimmpath)
if DEBUG(): print fpStr
ret = fimm.Exec( fpStr )
ret = strip_txt( ret )
if DEBUG(): print "\tReturned:\n%s"%(ret)
if ret.startswith("ERROR") or (ret.find("could not find node") != -1):
ErrStr = "import_device(): Error locating fimmprop node '%s'."%(fimmpath)
ErrStr += " FimmProp returned the message:\n\t%s"%(ret)
raise ValueError(ErrStr)
dev.nodestring = devname # use this to reference the device in Fimmwave
# Identify the type of element:
ret = strip_txt( dev.Exec( 'objtype' , check_built=False) )
if ret != 'FPDeviceNode':
ErrStr = "The referenced node `%s` is not a FimmProp Device or couldn't be found!\n\t"%(fimmpath) + "FimmWave returned object type:\n\t`%s`."%(ret)
raise ValueError(ErrStr)
if isinstance( obj, Project):
'''This Function was called as a method of the Project object'''
# copy the Device into this project:
fimm.Exec( dev.nodestring + ".copy()" ) # copy to system clipboard
# update device's references:
dev.set_parent(obj)
N_nodes = fimm.Exec(obj.nodestring+".numsubnodes()")
dev.num = int(N_nodes)+1
dev.nodestring = obj.nodestring + ".subnodes[%i]"%(dev.num)
# check node name, overwrite existing/modify dev's name if needed:
dev.name, samenodenum = check_node_name( dev.name, nodestring=obj.nodestring, overwrite=overwrite, warn=warn )
fimm.Exec( obj.nodestring + '.paste( "%s" )'%(dev.name) ) # paste into this project
dev.built = True
# Populate device parameters:
dev.__wavelength = dev.parent.checkvar( dev.Exec( "lambda" ) )
if DEBUG(): print dev.name + ".__wavelength = ", dev.__wavelength, str(type(dev.__wavelength))
dev.elements = []
els = dev.Exec( "cdev.eltlist" ) # get list of elements
if isinstance(els, str): els=[els] # if only one element, pdApp.Exec de-lists the array so it's just a string. must re-array it here.
if DEBUG(): print "els =", els
for i, el in enumerate(els):
elnum=i+1 # 1-indexing in FP
objtype = dev.Exec( "cdev.eltlist[%i].objtype"%(elnum) ).strip()
dev.elements.append(objtype)
if objtype=='FPsimpleJoint' or objtype == 'FPioSection':
'''SimpleJoints,IOports have no length,
don't add them as regular elements'''
if DEBUG(): print "Element %i is Joint: %s"%(elnum, objtype)
dev.jointpos.append(elnum)
elif objtype.lower().endswith('section') or objtype.strip() == 'FPtaper' or objtype.strip() == 'FPfspaceJoint' or objtype.strip() == 'FPbend':
''' Regular Section with a `*.length` attribute, including regular WG/Planar Sections'''
if objtype == 'FPRefSection':
NodeRef = False
''' This element references another element:
resolve the reference & get the properties'''
refpos = fimm.Exec( dev.nodestring + ".cdev.eltlist[%i].getrefid()"%(elnum) )
try:
refpos = int( refpos )
'''element is a reference to another element in this same device'''
if DEBUG(): print "Element %i is reference --> Element %i."%(elnum, refpos)
elnum = refpos # point to the original element
dev.elementpos.append(elnum)
dev.lengths.append( dev.parent.checkvar( dev.Exec( "cdev.eltlist[%i].length"%(elnum) ) ) )
if DEBUG(): print "Element %i: Length = "%(elnum) , dev.lengths[-1]
except ValueError:
'''element references another node entirely - refpos is probably a string'''
NodeRef=True
elnum = -1 # -1 indicates element is ref to another node
TempDev = "Device_%i" %( get_next_refnum() ) # generate dev reference name
fimm.Exec( 'Ref& ' + TempDev + ' = ' + dev.parent.nodestring + '.findnode("' + refpos + '")' )
dev.elementpos.append( (elnum,TempDev) ) # str indicates element is another node
# use the above to locate the device and get the length!
dev.lengths.append( None ) # <--- should resolve the reference and get the length! important for plotting!
#dev.lengths.append( dev.parent.checkvar( dev.Exec( "cdev.eltlist[%i].length"%(refpos) ) ) )
#dev.lengths.append( dev.Exec( "cdev.eltlist[%i].length"%(refpos) ) )
#if DEBUG(): print "Element %i: Length = "%(elnum) , dev.lengths[-1]
else:
if DEBUG(): print "Element %i is Section of type: %s"%(elnum, objtype)
dev.elementpos.append(elnum)
dev.lengths.append( dev.parent.checkvar( dev.Exec( "cdev.eltlist[%i].length"%(elnum) ) ) )
if DEBUG(): print "Element %i: Length = "%(elnum) , dev.lengths[-1]
#end if(FPRefSection)
else:
'''Eg. Lens = FPWGLens; can't get the length simply'''
print "WARNING: Element %i: "%(elnum) + "Unsupported Element Type:", objtype
dev.elementpos.append(elnum)
dev.lengths.append( None )
#if DEBUG(): print "%i: elementpos = ", dev.elementpos, " & jointpos = ", dev.jointpos
#end for(elements)
return dev
#end import_device()
# Alias to the same function, added to the Project object:
Project.import_device = _import_device
def import_device(project, fimmpath, name=None, overwrite=False, warn=False ):
''' Please see `help(pyfimm._import_device)` for complete help, the following is only partial documentation.
This function will return a new pyFIMM Device object pointing to a Device that exists in an imported Project (ie. one created in FimmProp & loaded from a file, rather than via pyFIMM).
This allows you to use the FimmProp GUI for Device construction, and then interact with those Devices via pyFIMM (acquiring fields, saving plots etc.).
Parameters
----------
project : pyFIMM Project object, required
Specify the pyFIMM Project from which to acquire the Device.
fimmpath : string, required
The FimmProp path to the Device, within the specified project. This takes the form of something like "DevName" if the device named "DevName" is at the top-level of the FimmProp Project, or "NodeName/SubDevName" is SubDevName is under another Node.
name : string, optional
Optionally provide a name for the new Device node in Fimmwave. If omitted, the name found in the Project will be used.
Returns
-------
pyFIMM Device object, referencing the fpDevice.
'''
return _import_device('device', project, fimmpath, name=name, overwrite=overwrite, warn=warn )
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,425
|
demisjohn/pyFIMM
|
refs/heads/master
|
/example2 - Rect Device with material db.py
|
'''
##########################################################################
Simple FimmProp Device example.
Creates a rectangular WG (RWG) with AlGaAs core using the default material database, `refbase.mat`
Solves for modes & plots the fundamental mode.
Then makes an identical waveguide that is wider, and creates a Device with the two different waveguide stuck together.
##########################################################################
'''
''' Get help on commands and objects by typing things into the console, like:
>>> help(pyfimm) or after the import below, >>> help(pf)
>>> help(pyfimm.set_mode_solver)
>>> help(pyfimm.Waveguide)
>>> help( pyfimm.Mode ) # the Mode class, for selecting a mode to work with
>>> help(pyfimm.Waveguide.buildNode)
or even easier, while building your script try:
>>> help(core) # will show help on the Material object
>>> help(strip) # will show help on the Waveguide object
>>> help(strip.buildNode) # shows options for Circ.buildNode()
>>> dir( strip.mode(0) ) # shows all the available functions that can be performed on modes, which are actually Mode objects.
>>> help( strip.mode(0).plot ) # help on the mode plotting function
For more verbose output, while programming the libraries for example, set the pyfimm DEBUG flag as so:
>>> pyFIMM.set_DEBUG()
This will enable various levels of extra output, that aids in finding out where a calculation or bug is occurring.
'''
import pyfimm as pf # Every script must begin with this line
#pf.set_DEBUG() # Enable Debugging output
pf.connect() # this connects to the FimmWave application. The FimmWave program should already be open (pdPythonLib.StartApplication() is not supported yet)
# Set Parameters (Your copy of FIMMWAVE has default values for these. You can change more than shown here. See __jaredwave.py
import sys, os
ScriptPath, ScriptFile = os.path.split( os.path.realpath(__file__) ) # Get directory of this script
pf.set_working_directory(ScriptPath) # Set this directory to the location of your script
pf.set_working_directory(ScriptPath) # Set FimmWave directory to the location of your script (needed to capture output files)
pf.set_eval_type('n_eff') # FIMMWAVE will label modes by the effective index (options: n_eff or beta)
pf.set_mode_finder_type('stable') # options: stable or fast
pf.set_mode_solver('vectorial FMM real') # Three words, any permuation of: 'vectorial/semivecTE/semivecTM FDM/FMM real/complex' for RWG.
pf.set_wavelength(1.55) # The unit of space is always 1 micrometer
pf.set_N_1d(100) # # of 1D modes found in each slice (FMM solver only)
pf.set_NX(100) # # of horiz. grid points for plotting & FDM
pf.set_NY(100) # # of vertical grid points for plotting & FDM
pf.set_N(3) # # of modes to solve for
pf.set_material_database('Materials/refbase.mat') # Use the material database provided by PhotonDesign. Only one matDB can be used at a time - to use multiple, set up your matDB to `include` other files.
# Project Node - You must build a project node at the beginning of every script
wg_prj = pf.Project() # Construct a Project object, pass a project name to the constructor (optional).
wg_prj.buildNode('Example 2 - Waveguide Device', overwrite=True)
# the buildNode() method makes FIMMWAVE build the objects.
# Here we've also set it to overwrite any existing project of the same name.
# Start constructing the Waveguide Node
t_clad = 6.0 # cladding thickness
t_core = 0.1 # core thickness
clad = pf.Material(1.4456) # Construct a Material python object, pass a refractive index as the argument
core=pf.Material('AlGaAs', 0.98) # AlGaAs with 98% Aluminum: defined in material database
# See `help(core)` or `help(pf.Material)` to see more info on Material objects & options to make them!
center = pf.Slice( clad(t_clad) + core(t_core, cfseg=True) + clad(t_clad) )
# The Core material here is also set as the Confinement Factor Segment.
side = pf.Slice( clad(2*t_clad+t_core) )
w_side = 6.0 # cladding width
w_core = 2.8 # width
strip = pf.Waveguide( side(w_side) + center(w_core) + side(w_side) )
# You can pass the Slice width to the Slice object with ()s
#strip.set_material_database('Materials/refbase.mat') # can set waveguide-specific material database - not recommended, as Device does not support this.
print "Printing `strip`:"
print strip # you can print your python objects to check them
#strip.set_parent(wg_prj) # You have to tell python which project node to build the waveguide node under
#strip.name = 'strip' # Name the node
#strip.buildNode()
strip.buildNode(name='strip', parent=wg_prj) # You can also set the parent & name while building.
#You must always build the node! This sends the actual Fimmwave commands to generate this waveguide in Fimmwave.
print "Calculating 'strip'..."
strip.calc() # Tell FIMMWAVE to solve for the modes!
# More sophisticated mode plotting: plot the Ex's of two selected modes & return the handles so that we can manipulate the plots with matplotlib:
fig, axes, images = strip.mode( [0,2] ).plot('Ex', return_handles=True)
# add the propagation constant of each mode to the plots:
# position text in axis-scale, not data-scale (`transform=...`)
PlotString = r"kz = %0.3f um^-1" % ( strip.mode(0).get_kz().real ) # insert the propagation const. into the %0.3f
axes[0].text( 0.05, 0.05, \
PlotString, \
transform=axes[0].transAxes, horizontalalignment='left', color='green', fontsize=14, fontweight='bold')
# Do some TeX formatting (sub/superscripts) with a 'raw' (r"...") string.
PlotString = r"$k_z = %0.3f \mu{}m^{-1}$" % ( strip.mode(2).get_kz().real )
axes[1].text( 0.05, 0.05, \
PlotString, \
transform=axes[1].transAxes, horizontalalignment='left', color='green', fontsize=14, fontweight='bold')
# Save the modified figure as so:
fig.savefig('Example 2 - Two Modes with Prop Const.png')
# Create a second waveguide that is identical but with 6.5um wider core:
strip2 = pf.Waveguide( side(w_side) + center(w_core+6.5) + side(w_side) )
#strip2.name='strip 2'
#strip2.set_parent(wg_prj)
strip2.buildNode(name='strip2', parent=wg_prj) # Two waveguides under the one project.
# Create a FimmProp Device with these two Waveguides concatenated (to propagate through multiple waveguides). Pass the lengths of each WG as arguments.
dev = pf.Device( strip(10.0) + strip2(15.0) )
#dev.set_parent(wg_prj)
#dev.name = 'WG Device'
#dev.buildNode()
dev.buildNode(name='WG Device', parent=wg_prj) # same as the above three lines
# You should now see the Device called "WG Device" in FimmProp!
# See `help(dev)` or `dir(dev)` to see what further funcionality is available via pyfimm.
# View fields in the device
dev.set_input( [1,0,0] ) # Set to launch Mode #0 only
dev.plot('I') # plot the intensity versus Z.
dev.plot('Ex', direction='-z', title='Reflected (-z) field') # Plot reflected wave only
#wg_prj.savetofile('rectdev with mat db') # save the project to a file. '.prj' will be appended.
#wg_prj.delete() # Delete the whole project!
#pyfimm.disconnect() # close TCP connection to application.
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,426
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Mode.py
|
'''Mode class, part of pyFIMM.'''
from __globals import * # import global vars & FimmWave connection object
# also contains AMF_FolderStr(), DEBUG(), numpy as np & pyplot as plt
from pylab import cm # color maps
import math
import os # for filepath manipulations (os.path.join/os.mkdir/os.path.isdir)
from __pyfimm import get_N, get_wavelength
#from pylab import * # no more global namespace imports
#from numpy import *
#import pylab as pl # use numpy instead (imported as np)
#import matplotlib.pyplot as plt # now imported in Globals.py
#import numpy as np
#AMF_FileStr = 'pyFIMM_temp'
class Mode:
'''Mode( WGobj, modenum, modestring )
Class for interacting with calculated Modes. Includes extracting field values and mode plotting.
Note that a Mode object is rarely instantiated directly - it instead is created when a waveguide/circ's `mode()` method is used - mode() returns a Mode object. This allows behaviour like:
WGobj.mode(0).plot()
Where `WGobj.mode(0)` returns a Mode object instantiated with modenum=0, and `.plot()` is a moethod of that Mode object.
Parameters
----------
WGobj : Waveguide or Circ object
The waveguide to extract modes from.
modenum : int
Choose which mode number to manipulate.
To DO: support int, list of ints, or the string 'all'
modestring : string
The fimmwave string to reference the modes of the waveguide node. See Circ.mode() or Waveguide.mode() to see how this string is set.
Methods
-------
This is a partial list - see `dir(WG.mode(0))` to see all methods.
Please see help on a specific function via `help(Mode.theFunc)` for detailed up-to-date info on accepted arguments etc.
get_n_eff()
return the effective index of this mode
get_n_g()
return the group index of this mode
get_kx()
return the propagation constant of this mode
get_percent_TE()
Return the "TEfrac" - or percentage of the mode that is transverse-electric.
get_confinement()
Return the confinement factor.
get_confinement_ey()
Return the confinement factor for the Ey field.
get_dispersion()
Return the modal dispersion.
get_effective_area()
Return the effective mode area.
get_fill_factor()
Return the fill factor.
get_state()
Return the Fimmwave state of this mode.
activate()
Set fimmwave state of this mode to 1
deactivate()
Set fimmwave state of this mode to 0
field(component, include_pml=True)
Get the value of a particular electromagnetic field from this Mode.
Returns the field component of the whole mode profile.
See help on this function for parameters.
P():
GUESS: Return the total power of this mode?
plot( component )
Plot the mode specified.
See help on this function for more info.
save_plot( component, prefix, Title )
Save a plot of this mode.
See help on this function for more info.
Attributes
----------
modenum : integer or list
Which modenumbers are being operated on
list_num : integer or list
Fimmwave index to reference the desired mode: integer or list
modeString : str
fimmwave string to access desired modes or desired node.
eg. 'app.subnodes[{3}].subnodes[{1}].evlist.'
'''
# Note: Had to remove `__` from start of these local class variables, in order to allow the ./proprietary/UCSB.py file to access them directly
def __init__(self,*args):
if len(args) == 0:
self.obj = None
self.modenum = None
self.list_num = None
self.modeString = None
elif len(args) == 3:
'''Waveguide.mode(n) & Circ.mode(n) always call this case'''
self.obj = args[0] # the waveguide object
num = args[1] # mode number(s) requested
#self.list_num = args[1] + 1 # add one to ModeNum
self.modeString = args[2] # fimmwave string to access the mode, including trailing `.`
else:
print 'Invalid number of input arguments to Mode()'
# Check if requested 'all' modes:
if isinstance(num, str):
if num.lower() == 'all':
#num = -1 # plot all modes
self.modenum = range(0, get_N() ) # list of each modenumber calc'd
self.list_num = range(1, get_N()+1) # add one to ModeNum
else:
ErrStr = 'CavityMode: Mode Number must be an integer, list of integers, or the string "all".'
raise ValueError(ErrStr)
elif isinstance(num, int):
self.modenum = [num] # put num into a list
self.list_num = [num+1]
else:
try:
self.modenum = [int(x) for x in num] # check that we're able to create a list of integers
self.list_num = [x+1 for x in self.modenum] # add one to ModeNum
except:
ErrStr = 'Mode: Mode Number must be an integer, list of integers, or the string "all".'
raise ValueError(ErrStr)
#end if(num)
if np.max(self.list_num) > get_N():
ErrStr = "Mode: Requested Mode number %i is too high: `set_N()` currently only calculates %i modes (which start at Mode #0)." %(np.max(self.modenum), get_N() )
raise ValueError(ErrStr)
if DEBUG(): print self.obj.name + ".Mode: modenum = ", self.modenum, "; list_num = ", self.list_num
#end __init__()
def __str__(self):
'''What to display if the Waveguide is `print`ed.'''
string = ""
if self.obj.name: string += "Waveguide Name: '"+self.obj.name+"'\n"
for n, num in enumerate(self.list_num):
string += "Mode (%i):\n"%num
string += "\tModal Index (n_eff) = %0.5f \n"%(self.get_n_eff(as_list=True)[n].real)
string += "\tGroup Index (n_g) = %0.5f \n"%(self.get_n_g(as_list=True)[n].real)
string += "\tPercent of the mode in TE direction = %0.1f %% \n"%(self.get_percent_TE(as_list=True)[n])
string += "\tConfinement Factor (overlap with cfseg) = %0.1f \n"%(self.get_confinement(as_list=True)[n])
string += "\tEffective Area = %0.3f um^2 \n"%(self.get_effective_area(as_list=True)[n])
string += "\tAttenuation = %0.3f 1/cm \n"%(self.get_attenuation(as_list=True)[n])
string += "\tPropagation Constant = %0.3f + j*%0.3f 1/um \n"%(self.get_kz(as_list=True)[n].real, self.get_kz(as_list=True)[n].imag)
return string
def get_n_eff(self, as_list=False):
'''Return the Modal index.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.'''
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].neff()") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
#end n_eff()
def n_eff(self):
'''Backwards compatibility only.
Use get_n_eff() instead.'''
print "n_eff(): DeprecationWarning: Use get_n_eff() instead."
return self.get_n_eff()
def get_n_g(self, as_list=False):
'''Return the group index.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.'''
fimm.Exec( self.modeString + "list[{" + str(self.list_num[0]) + "}].modedata.update(1)" + "\n" )
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.neffg") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
#end get_n_g()
def n_g(self):
'''Backwards compatibility only.
Use get_n_g() instead.'''
print "n_g(): DeprecationWarning: Use get_n_g() instead."
return self.get_n_g()
def get_kz(self, as_list=False):
'''Return the propagation constant.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.'''
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].beta()")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].beta()") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def kz(self):
'''Backwards compatibility only.
Use get_kz() instead.'''
print "kz(): DeprecationWarning: Use get_kz() instead."
return self.get_kz()
def get_percent_TE(self, as_list=False):
'''Return the fraction of power that is TE polarized.
If not calculated, returns `None` (Fimmwave returns -99).
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.'''
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.tefrac")
out=[]
for num in self.list_num:
x = fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.tefrac")
if x == -99: x = None
out.append( x )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def percent_TE(self):
'''Backwards compatibility only.
Use get_percent_TE() instead.'''
print "percent_TE(): DeprecationWarning: Use get_percent_TE() instead."
return self.get_percent_TE()
def get_confinement(self, as_list=False):
'''Return the confinement factor for this mode - how much of the optical mode overlaps with the waveguide segments set as "cfseg" (confinement factor). (See FimmWave Manual Sec.4.7)
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : fractional confinement factor (0-->1)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.gammaE")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.gammaE") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_confinement_ey(self, as_list=False):
'''This is a confinement factor estimation that includes just the Ey component of the field, defined over the region specified by the csfeg flag (see FimmWave Manual Sec.4.7).
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : fractional confinement factor (0-->1)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.gammaEy")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.gammaEy") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_fill_factor(self, as_list=False):
'''Return the fill factor for this mode.
This is a measure of the fraction of the mode power flux defined over the region specified by the csfeg flag (see FimmWave Manual Sec.4.7).
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : fractional fill factor (0-->1)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.fillFac")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.fillFac") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_dispersion(self, as_list=False):
'''Return the mode dispersion (ps/nm/km) - see Fimmwave Manual Sec. 13.2.8 for definition.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : mode dispersion (ps/nm/km)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(1)") # calc 'all'
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.dispersion")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.dispersion") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_attenuation(self, as_list=False):
'''Return the mode attenuation (1/cm), calculated from the imaginary part of the effective (modal) index.
Corresponds to `ModeLossEV` (complex attenuation), so only available with complex solvers.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : mode attenuation (1/cm)
'''
#fimm.Exec(self.modeString + "list[{" + str(self.list_num[0]) + "}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.alpha")
'''out=[]
for num in self.list_num:
#out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.alpha") )
out.append( self.get_n_eff(as_list=True).imag * 4*math.pi / (get_wavelength()*1e-4) )
'''
# alpha [cm^-1] = imaginary(n_eff) * 4 pi / (wavelength [cm])
out = ( np.imag( self.get_n_eff(as_list=True) ) * 4*math.pi / (get_wavelength()*1e-4 ) ).tolist()
# math on the numpy array returned by np.imag(), then conv. back to list
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_material_loss(self, as_list=False):
'''Return the loss due to material absorption. Based on the mode overlap with materials that have an attenuation/absorption coefficient.
Corresponds to `ModeLossOV` in the GUI.
If you are using a complex solver then modeLossOV is just the "material loss". When using a complex solver in absence of absorbing boundaries then modeLossEV and modeLossOV should match, provided that nx and ny are sufficient.
If you are using a real solver then modeLossOV is the material loss approximated from the real profile. The point of calling it "OV" is to highlight that it is calculated via an overlap, and that it is therefore approximate when using a real solver.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : mode attenuation (1/cm)
'''
fimm.Exec(self.modeString + "list[{" + str(self.list_num[0]) + "}].modedata.update(0)")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.alpha") * 1e4 ) # convert to 1/cm
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_effective_area(self, as_list=False):
'''Return the effective core area (um^2).
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : effective core area (um^2)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.a_eff")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.a_eff") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_side_loss(self, as_list=False):
'''Return the side power loss (1/um). CHECK THESE UNITS - the popup window says 1/cm.
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
float : side power loss (1/um)
'''
fimm.Exec(self.modeString+"list[{"+str(self.list_num[0])+"}].modedata.update(0)")
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.sideploss")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.sideploss") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def get_state(self, as_list=False):
'''Get fimmwave state of this mode as integer.
INTEGER - state: 0=INACTIVE,1=ACTIVE,2=BAD or INCONSISTENT
Parameters
----------
as_list : boolean, optional
If a single-value is returned, by defualt it's de-listed (just a float/int). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.'''
#return fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].state")
out=[]
for num in self.list_num:
out.append( fimm.Exec(self.modeString + "list[{" + str(num) + "}].state") )
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
def state(self):
'''Backwards compatibility only.
Use get_state() instead.'''
print "state(): DeprecationWarning: Use get_state() instead."
return self.get_state()
def activate(self):
'''Set fimmwave state to Active, 1'''
#fimm.Exec(self.modeString+"setstate({"+str(self.list_num)+"},1)")
for num in self.list_num:
fimm.Exec(self.modeString + "setstate({" + str(num) + "},1)")
def deactivate(self):
'''Set fimmwave state to Inactive, 0'''
#fimm.Exec(self.modeString+"setstate({"+str(self.list_num)+"},0)")
for num in self.list_num:
fimm.Exec(self.modeString + "setstate({" + str(num) + "},0)")
def get_field(self, component, include_pml=True, as_list=False):
'''field(component [, include_pml])
Get the value of a particular electromagnetic field from this Mode.
Returns the field component of the whole mode profile.
Parameters
----------
component : string, { 'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' | 'I' }, case insensitive
Choose which field component to return. I is intensity.
include_pml : { True | False }
Whether to include perfectly-matched layer boundary conditons. True by default.
as_list : boolean, optional
If a single-mode is returned, by default it's de-listed (just a singel array). If `as_list=True`, then it is returned as a single-element list - useful when iterating multiple modes. False by default.
Returns
-------
fieldarray : [Nx x Ny] list of all the field values.
Nx and Ny are set by `pyfimm.set_Nx()` & `.set_Ny()`.
It is recommended that you convert this to an array for performing math, eg. `numpy.array( fieldarray )`
If multiple modes were selected (eg. `WG.mode([0,1,2])`), then a list is returned containing the numpy field array for each mode, eg. `fieldarray = [ Mode0[Nx x Ny], Mode1[Nx x Ny], Mode2[Nx x Ny] ]`
'''
if include_pml:
if DEBUG(): print "Mode.field(): include_pml"
pml = '1'
else:
pml='0'
component = component.lower().strip() # to lower case & strip whitespace
#if len(component) == 1:
if component == 'Ex'.lower():
comp='1'
elif component == 'Ey'.lower():
comp='2'
elif component == 'Ez'.lower():
comp='3'
elif component == 'Hx'.lower():
comp='4'
elif component == 'Hy'.lower():
comp='5'
elif component == 'Hz'.lower():
comp='6'
elif component == 'I'.lower():
comp='7'
else:
raise ValueError("Mode.field(): Invalid field component requested.")
if DEBUG(): print "Mode.field(): f = " + self.modeString + \
"list["+str(self.list_num)+"].profile.data.getfieldarray("+comp+","+pml+") \n\t f.fieldarray"
# Check if modes have been calc()'d:
a = fimm.Exec(self.modeString+"list["+str(self.list_num[0])+"].profile.update()")
# Check if modes have been calc()'d:
if DEBUG(): print "field(): #",a[:-2].strip(),'#\n'
if a[:-2].strip() != '':
WarningString = "FimmWave error: please check if the modes have been calculated via WG.calc().\n\tFimmWave returned: `%s`"%a[:-2].strip()
raise UserWarning(WarningString)
#fimm.Exec("Set f = " + self.modeString + "list[" + str(self.list_num) + "].profile.data.getfieldarray(" + comp + "," + pml + ") \n" )
#field = fimm.Exec("f.fieldarray")
out=[]
for num in self.list_num:
fimm.Exec("Set f = " + self.modeString + "list[" + str(num) + "].profile.data.getfieldarray(" + comp + "," + pml + ") \n" ) # must set this as a variable to avoid memory error
out.append( fimm.Exec("f.fieldarray") ) # grab the array (as list)
if len(self.list_num) == 1 and as_list==False:
out = out[0]
return out
#if DEBUG(): print "Mode.field(): \n", field, "\n--------------"
#return np.array(field)
#end get_field()
# Alias for this function
field = get_field
def P(self):
'''Return the Power Density - I think in J/um'''
if len(self.list_num) > 1:
ErrStr = "Mode.P(): Only supports a single mode number being passed."
raise NotImplementedError(ErrStr)
else:
num = self.list_num[0]
# Check if modes have been calc()'d:
a = fimm.Exec(self.modeString+"list["+str(num)+"].profile.update()"+"\n")
# Check if modes have been calc()'d:
if DEBUG(): print "P(): #",a[:-2].strip(),'#\n'
if a[:-2].strip() != '':
ErrStr = "FimmWave error: please check if the modes have been calculated via WG.calc().\n\tFimmWave returned: `%s`"%a[:-2].strip()
raise UserWarning(ErrStr)
fimm.Exec(self.modeString+"list["+str(num)+"].profile.data.writeamf("+\
"mode"+str(num)+"_pyFIMM.amf,%10.9f)" )
## AMF File Clean-up
fin = open("mode"+str(num)+"_pyFIMM.amf", "r")
data_list = fin.readlines()
fin.close()
# Delete File Header
nxy_data = data_list[1]
xy_data = data_list[2]
slvr_data = data_list[6]
del data_list[0:9]
fout = open("nxy"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(nxy_data)
fout.close()
nxy = np.loadtxt("nxy"+str(num)+"_pyFIMM.txt", comments='//')
nx = int(nxy[0])
ny = int(nxy[1])
fout = open("xy"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(xy_data)
fout.close()
xy = np.loadtxt("xy"+str(num)+"_pyFIMM.txt", comments='//')
fout = open("slvr"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(slvr_data)
fout.close()
iscomplex = np.loadtxt("slvr"+str(num)+"_pyFIMM.txt", comments='//')
# Resave Files
fout = open("Ex"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(data_list[1:nx+2])
fout.close()
fout = open("Ey"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(data_list[(nx+2)+1:2*(nx+2)])
fout.close()
fout = open("Hx"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(data_list[3*(nx+2)+1:4*(nx+2)])
fout.close()
fout = open("Hy"+str(num)+"_pyFIMM.txt", "w")
fout.writelines(data_list[4*(nx+2)+1:5*(nx+2)])
fout.close()
del data_list
# Get Data
Ex = np.loadtxt("Ex"+str(num)+"_pyFIMM.txt")
Ey = np.loadtxt("Ey"+str(num)+"_pyFIMM.txt")
Hx = np.loadtxt("Hx"+str(num)+"_pyFIMM.txt")
Hy = np.loadtxt("Hy"+str(num)+"_pyFIMM.txt")
Ex = np.array(Ex)
Ey = np.array(Ey)
Hx = np.array(Hx)
Hy = np.array(Hy)
Sz = (Ex*Hy.conjugate() - Ey*Hx.conjugate()) / 2.0
xStart = xy[0]
xEnd = xy[1]
dx = (xEnd - xStart)/nx
yStart = xy[2]
yEnd = xy[3]
dy = (yEnd - yStart)/ny
dA = dx*dy*1e-12
return sum(Sz)*dA
#end P()
def plot(self, *args, **kwargs ):
#, include_pml=True):
'''plot( [ component, title='str', return_handles=False ] )
Plot the mode fields with matplotlib. If multiple modes are specified (eg. `WG.mode([0,1,2]).plot()` ) then each mode will be plotted in a 2-column subplot on one figure.
Parameters
----------
component : { 'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' }, case insensitive, optional
Choose which field component to return.
If omitted, will choose the Ex or Ey component depending on which has a higher fraction of the field (TEfrac). For plots of multiple modes, this check of TEfrac will be performed for each specified mode.
title : string, optional
Will prepend this text to the output filename, and do the same to the Plot Title.
If not provided, the name of the passed Waveguide component, Mode Number & Field Component will be used to construct the filename & plot title.
annotations : boolean, optional
If true, the effective index, mode number and field component will written on each mode plot. True by default.
return_handles : { True | False }, optional
If True, will return handles to the figure, axes and images. False by default.
Returns
-------
fig, axes, imgs
The matplotlib figure, axis and image (`pyplot.imshow()` ) handles. Only returned if `return_handles=True`
`fig` is the handle to the whole figure, allowing you to, for example, save the figure yourself (instead of using `Mode.save_plot()` ) via `fig.savefig(pat/to/fig.png)`.
`ax` is a list of the possibly multiple axes created by a call to maplotlib.pyplot.subplots(). Note the non-matlab-like behviour of the returned axes array: they take the form of the actual subplots layout.
For example, for a single axis created by
>>> fig, axes, imgs = strip.mode( 0 ).plot( return_handles=True)
axes is a single axis handle.
For two axes (eg. `mode( [0,1] ).plot()`, `axes` is a two-valued array: [ax0, ax1]
However, for more than 2 modes, `axes` takes the form of the subplots layout, like so:
>>> fig, axes, imgs = strip.mode( [0,1,2,3,4,5] ).plot( return_handles=True)
> axes = [ [ax0, ax1],
[ax2, ax3],
[1x4, ax5] ]
So be careful when indexing into a plot of numerous modes, due to the weirdness of `pyplot.subplots()`.
Examples
--------
>>> stripWG.mode(0).calc() # calculate the modes of the waveguide
>>> stripWG.mode(0).plot()
Plot the E-field component with the maximum field by default (eg. Ex for TE, and Ey for TM)
>>> stripWG.mode(0).plot('Hy')
plot the Hy component instead
>>> stripWG.mode(0).plot(title='My Mode')
plot Ex component with plot title "My Mode - mode 0.png"
>>> stripWG.mode('all').plot()
Will plot the Ex or Ey component (whichever is the major comp.) of all calc'd modes (as specified by `set_N()` ).
>>> stripWG.mode( [0,2] ).plot('Ey', title='Ey of Modes 0 and 2')
Plot the Ey components of Modes 0 & 2 on one figure with custom figure title.
>>> fig1, ax1, im = stripWG.mode(0).plot(return_handles = True)
Return the matplotlib figure, axis and image handles, for future manipulation.
For example, this allows you to add other annotations to a plot, and then save the figure:
>>> fig1, ax1, im = stripWG.mode(0).plot(return_handles = True)
>>> ax1.text( 0.05, 0.05, \
>>> r"$\alpha = %0.3f cm^{-1}$" %( stripWG.mode(0).get_attenuation() ), \
>>> transform=axis.transAxes, horizontalalignment='left', color='green', fontsize=9, fontweight='bold')
>>> fig1.savefig('Mode with attenuation.png')
'''
import os, sys
if len(args) == 0:
field_cpt_in = None
'''
tepercent = fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.tefrac")
if tepercent > 50:
field_cpt = 'Ex'.lower()
else:
field_cpt = 'Ey'.lower()
'''
elif len(args) == 1:
field_cpt_in = args[0]
if isinstance(field_cpt_in,str) or field_cpt_in==None :
if field_cpt_in != None:
'''args[0] is a string: '''
field_cpt = field_cpt_in.lower().strip()
#else args[0] = None !
else:
ErrStr = "Mode.plot(): Unrecognized field component requested: `" + str(args[0]) + "`. See `help(<pyfimm>.Mode.plot)` for more info."
raise ValueError(ErrStr)
else:
ErrStr = "Mode.plot(): Invalid number of arguments. See `help(<pyfimm>.Mode.plot)` for more info."
raise ValueError(ErrStr)
return_handles = kwargs.pop('return_handles', False)
annotations = kwargs.pop('annotations', True)
ptitle = kwargs.pop('title',None)
if ptitle:
plot_title = ptitle + " - Mode " + str(self.modenum)
else:
plot_title = '"'+self.obj.name+'":' + " Mode " + str(self.modenum)
'''Unused kwargs returned at end of this function'''
# Check if modes have been calc()'d:
a = fimm.Exec(self.modeString+"list["+str(self.list_num[0])+"].profile.update()")
# Check if modes have been calc()'d:
if DEBUG(): print "plot(): #",a[:-2].strip(),'#\n'
if a[:-2].strip() != '':
ErrStr = "FimmWave error: please check if the modes have been calculated via `WG.calc()`.\n\tFimmWave returned: `%s`"%a[:-2].strip()
raise UserWarning(ErrStr)
# get effective indices of each mode (add to plot):
nmodes = self.get_n_eff(as_list=True)
if DEBUG(): print "mode.plot(): nmodes =", nmodes
# create the required number of axes:
# Options for the subplots:
sbkw = {'axisbg': (0.15,0.15,0.15)} # grey plot background
if len(self.list_num) == 1:
fig1, axs = plt.subplots(nrows=1, ncols=1, subplot_kw=sbkw)
else:
Rows = int( math.ceil( len(self.list_num)/2. ) )
fig1, axs = plt.subplots( nrows=Rows , ncols=2, sharex=True, sharey=True, subplot_kw=sbkw)
if len(self.list_num) % 2 == 1:
'''If odd# of modes, Delete the last (empty) axis'''
fig1.delaxes( axs[ len(axs)-1, 1] )
#axs = axs[:-1] # remove del'd axis from list
fig1.suptitle(plot_title) # figure title
fig1.canvas.draw() # update the figure
ims = []
for n, num in enumerate(self.list_num):
# Which axis to draw on:
if len(self.list_num) == 1:
'''only single plot'''
axis = axs
elif len(np.shape(axs)) == 1:
'''only one row, so axs = [ax1, ax2]'''
axis = axs[ n ]
else:
'''multiple rows, so axs=[ [ax1,ax2], [ax3,ax4]...]'''
axis = axs[ math.floor( n/2. ), n%2. ]
# write an AMF file with all the field components.
mode_FileStr = "mode"+str(num)+"_pyFIMM.amf" # name of files
# SubFolder to hold temp files:
if not os.path.isdir(str( AMF_FolderStr() )):
os.mkdir(str( AMF_FolderStr() )) # Create the new folder
mode_FileStr = os.path.join( AMF_FolderStr(), mode_FileStr )
if DEBUG(): print "Mode.plot(): " + self.modeString+"list[" + str(num) + "].profile.data.writeamf("+mode_FileStr+",%10.6f)"
fimm.Exec(self.modeString+"list[" + str(num) + "].profile.data.writeamf("+mode_FileStr+",%10.6f)")
## AMF File Clean-up
#import os.path, sys # moved to the top
fin = open(mode_FileStr, "r")
if not fin: raise IOError("Could not open '"+ mode_FileStr + "' in " + sys.path[0] + ", Type: " + str(fin))
data_list = fin.readlines()
fin.close()
# Delete File Header
nxy_data = data_list[1]
xy_data = data_list[2]
slvr_data = data_list[6]
del data_list[0:9]
# strip the comment lines from the nxy file:
nxyFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_nxy.txt")
fout = open(nxyFile, "w")
fout.writelines(nxy_data)
fout.close()
nxy = np.loadtxt(nxyFile, comments='//')
nx = int(nxy[0])
ny = int(nxy[1])
xyFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_xy.txt")
fout = open(xyFile, "w")
fout.writelines(xy_data)
fout.close()
xy = np.loadtxt(xyFile, comments='//')
slvrFile = os.path.join( AMF_FolderStr(), "mode" + str(num) + "_pyFIMM_slvr.txt")
fout = open(slvrFile, "w")
fout.writelines(slvr_data)
fout.close()
iscomplex = np.loadtxt(slvrFile, comments='//')
# Find Field Component
if field_cpt_in == None:
'''If unspecified, use the component with higher field frac.'''
tepercent = fimm.Exec(self.modeString + "list[{" + str(num) + "}].modedata.tefrac")
if tepercent > 50:
field_cpt = 'Ex'.lower()
else:
field_cpt = 'Ey'.lower()
#end if(field_cpt_in)
if field_cpt == 'Ex'.lower():
data = data_list[1:nx+2]
elif field_cpt == 'Ey'.lower():
data = data_list[(nx+2)+1:2*(nx+2)]
elif field_cpt == 'Ez'.lower():
data = data_list[2*(nx+2)+1:3*(nx+2)]
elif field_cpt == 'Hx'.lower():
data = data_list[3*(nx+2)+1:4*(nx+2)]
elif field_cpt == 'Hy'.lower():
data = data_list[4*(nx+2)+1:5*(nx+2)]
elif field_cpt == 'Hz'.lower():
data = data_list[5*(nx+2)+1:6*(nx+2)]
else:
ErrStr = 'Invalid Field component requested: ' + str(field_cpt)
raise ValueError(ErrStr)
del data_list
# Resave Files
mode_FileStr = mode_FileStr+"_"+field_cpt.strip().lower()
fout = open(mode_FileStr, "w")
fout.writelines(data)
fout.close()
# Get Data
if iscomplex == 1:
field_real = np.loadtxt(mode_FileStr, usecols=tuple([i for i in range(0,2*ny+1) if i%2==0]))
field_imag = np.loadtxt(mode_FileStr, usecols=tuple([i for i in range(0,2*ny+2) if i%2!=0]))
else:
field_real = np.loadtxt(mode_FileStr)
'''field_real = np.real(field)'''
# Plot Data
xStart = xy[0]
xEnd = xy[1]
yStart = xy[2]
yEnd = xy[3]
im = axis.imshow(np.rot90(abs(field_real),1), cmap=cm.hot, aspect='auto', extent=(xStart,xEnd,yStart,yEnd))
im.set_interpolation('bilinear')
ims.append(im)
#axis.set_xlabel('x ($\mu$m)')
#axis.set_ylabel('y ($\mu$m)')
if annotations:
titlestr = "Mode(" + str(num-1) + "): " + field_cpt.title()
#axis.set_title( titlestr )
axis.text( 0.95, 0.9, titlestr, transform=axis.transAxes, horizontalalignment='right', color='green', fontsize=9, fontweight='bold')
n_str = "$\mathregular{n_{eff} =}$ %0.5f"%(nmodes[n].real)
axis.text( 0.05, 0.9, n_str, transform=axis.transAxes, horizontalalignment='left', color='green', fontsize=9, fontweight='bold')
fig1.canvas.window().raise_() # bring plot window to front
fig1.canvas.draw() # update the figure
#end for(list_num)
'''
ax1.set_xlabel('x ($\mu$m)')
ax1.set_ylabel('y ($\mu$m)')
ax1.set_title( self.obj.name + ": Mode(" + str(self.modenum) + "): " + field_cpt.title() )
'''
#fig1.canvas.window().raise_() # bring plot window to front
#fig1.canvas.draw()
fig1.show()
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Mode.plot(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
if return_handles: return fig1, axs, ims
#end plot(Waveguide/Circ)
#end plot()
def save_plot(self,*args, **kwargs):
'''save_plot( [ component, title='str', path=None ] )
Save the mode profile to a file. Actually just calls Mode.plot(component, title) & saves the resulting figure.
Parameters
----------
component : { 'Ex' | 'Ey' | 'Ez' | 'Hx' | 'Hy' | 'Hz' }, case insensitive, optional
Choose which field component to return.
If omitted, will choose the Ex or Ey component depending on which has a higher fraction of the field (TEfrac).
title : string, optional
Will prepend this text to the output filename, and do the same to the Plot Title. If `path` is not provided, the filename will also have this text prepended.
If not provided, the name of the passed Waveguide component, Mode Number & Field Component will be used to construct the filename & plot title.
return_handles : { True | False }, optional
If True, will return handles to the figure, axes, legends and lines. False by default.
path : string, optional
Path to save file to, including base filename. File extension will be automatically appended.
closefigure : boolean, optional
If `True`, will close the figure window after the file has been saved. Useful for large for() loops.
Extra keyword-arguments are passed to Mode.plot()
Examples
--------
>>> stripWG.mode(0).calc() # calculate the modes of the waveguide
>>> stripWG.mode(0).save_plot()
saves the Ex component to file "mode 1 - Ex.png"
>>> stripWG.mode(0).save_plot('Hy')
save the Hy component instead
>> stripWG.mode(0).save_plot(title='My Mode')
saves Ex component to file "My Mode - mode 1 - Ex.png"
>> stripWG.mode(0).save_plot('I', title='My Mode')
saves Intensity to file "My Mode - mode 1 - Ex.png"
>> fig1, ax1, im = stripWG.mode(0).save_plot(return_handles = True)
Return the matplotlib figure, axis and image handles, for future manipulation.
Returns
-------
fig1, ax1, im
The matplotlib figure, axis and image (imshow) handles, returned only if `return_handles = True`.
'''
import os.path
if len(args) == 0:
field_cpt = None
'''
tepercent = fimm.Exec(self.modeString+"list[{"+str(self.list_num)+"}].modedata.tefrac")
if tepercent > 50:
field_cpt = 'Ex'.lower()
else:
field_cpt = 'Ey'.lower()
'''
elif len(args) == 1:
field_cpt = args[0].lower().strip()
else:
ErrStr = "Mode.plot(): Invalid number of arguments. See `help(<pyfimm>.Mode.plot)` for more info."
raise ValueError(ErrStr)
returnhandles = kwargs.pop('return_handles', False)
path = kwargs.pop('path', None)
closefigure = kwargs.pop('closefigure', False)
ptitle = kwargs.pop('title',None)
if ptitle:
plot_title = ptitle + " - Mode " + str(self.modenum)
else:
plot_title = self.obj.name + " - Mode " + str(self.modenum)
# plot the mode:
handles = self.plot(field_cpt, title=ptitle, return_handles=True, **kwargs)
fig1 = handles[0]
if path:
savepath = path + '.png'
else:
savepath = plot_title + '.png'
print "Saving Plot to:", savepath
fig1.savefig( savepath ) # save the figure
if closefigure: plt.close(fig1)
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Mode.save_plot(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
if returnhandles: return handles
#end save_plot()
#end class Mode
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,427
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/PhotonDesignLib/pdAppclient.py
|
#pdAppClient (PYTHON version)
from pdPythonLib import *
import sys
from string import *
if len(sys.argv)<3:
print "pdAppClient (PYTHON Version) Syntax:"
print "pdAppClient <portNo> <hostname>"
print "<portNo> = the port number on which the application is serving"
print "<hostname> = the name (or IP address) where application is serving"
else:
_portNo = atoi(sys.argv[1])
f = pdApp()
retmsg = f.ConnectToApp(sys.argv[2],_portNo)
if retmsg!="":
print retmsg
else:
print "Connected to Application"
print "Enter your commands or enter exit to finish"
isDone = 0
while isDone==0:
comm = raw_input("COMMAND: ")
if comm[0:4]=="exit":
isDone = 1
else:
rec = f.Exec(comm)
print rec
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,428
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Classes.py
|
'''Various smaller Classes, part of pyFIMM.
This module is imported by pyfimm.py
Included here are the following classes:
Node (Inherited by all objects that are actual fwNodes)
Project & import_project()
Material
Layer (Waveguide/Circ)
Slice (Waveguide)
Section (Device)
Also some Node-specific functions such as strip_txt(), check_node_name() etc.
'''
from __globals import * # import global vars & FimmWave connection object
# DEBUG() variable is also set in __globals, & numpy as np & pyplot as plt
#from __pyfimm import * # import the main module (should already be imported)
# NOTE: shouldn't have to duplicate the entire pyfimm file here! Should just import the funcs we need...
import os.path # for path manipulation
import datetime as dt # for date/time strings
import random # random number generators
####################################################
# Node-Specific Functions
####################################################
def strip_txt(FimmString):
'''Remove the EOL characters from FimmWave output strings.'''
junkchars = '\n\x00' # characters to remove
if isinstance(FimmString, str):
if FimmString.endswith(junkchars): FimmString = FimmString.strip( junkchars ) # strip off FimmWave EOL/EOF chars.
return FimmString.strip() # strip whitespace on ends
# Alias for the same function:
strip_text = striptxt = strip_txt
def strip_array_old(FimmArray):
'''DEPRECATED: Remove EOL & 'None' elements of a returned list or array.
This version only corrects a 1-D array.'''
if isinstance( FimmArray, list ):
if FimmArray[0] is None: FimmArray = FimmArray[1:] # omit 1st 'None' element
return FimmArray
def strip_array( FimmArray ):
'''Remove erroneous 'None' elements of a returned list or array.'''
if DEBUG(): print "strip_array_test(): Type=", type(FimmArray)
if isinstance( FimmArray, list ):
if DEBUG(): print( "\tOrig = "+str(FimmArray) )
if FimmArray[0] is None:
if DEBUG(): print("\tFimmArray[0]==None; stripping...")
FimmArray = FimmArray[1:] # omit 1st 'None' element
if DEBUG(): print( "\t"+str(FimmArray) )
for row in range(len(FimmArray)):
if FimmArray[row][0] is None:
if DEBUG(): print( "\tFimmArray[%i][0]==None; stripping..."%(row) )
FimmArray[row] = FimmArray[row][1:]
if DEBUG(): print( "\t"+str(FimmArray[row]) )
return FimmArray
def eval_string(fpStr):
'''Check if a string is numeric, and if so, return the numeric value (as int, float etc.). If the string is not numeric, the original string is returned.
This mainly handles the security issues of running `eval()` on random strings returned by Fimmprop.'''
# convert numbers:
# only unicode str's have the .isnumeric() method
if unicode(fpStr).isnumeric():
return eval(fpStr)
else:
return fpStr
#end eval_string()
def check_node_name( name, nodestring="app", overwrite=False, warn=False ):
''' See if the node name already exists in FimmWave, and return a modified project name (with random numbers appended) if it exists.
Parameters
----------
name : string
The name to check. `name` will be checked against all the node-names at the specified level.
nodestring : string, optional
Specifies the node to check for an existing node name. Defaults to "app.", which means you're checking top-level Project names. If, instead, `nodestring = app.subnodes[1].` then you're checking node names within the 1st project in FimmWave.
warn : { True | False }, optional
Print a warning if the node name exists? Defaults to False, but still prints if the global pyFIMM.set_WARN() is True, which it is by default. Use set_WARN()/unset_WARN() to alter.
overwrite : { True | False | 'reuse' }, optional
If True, will try to delete an already-loaded Fimmwave project that has the same name in Fimmwave. Will only delete the node if it is the last in the node list (This prevents breaking pyFIMM references to FimmWave Projects). Otherwise, the new FimmWave node will have it's name changed. If False, will append random digits to supplied project name and return it in `nodename`.
If 'reuse', then the node won't be deleted, so the existing Node can be referenced.
False by default.
Returns
-------
nodename : str
New name for the node. If the original `name` existed in the specified node list, `nodename` will have random digits appended to the name. Otherwise, it will be left untouched, and be identical to the provided `name`. Thus, if `nodename != name` then the node `name` already exists in the FimmWave node list. The modified name will have the form `OrigNodeName.123456`.
sameprojnum : int
Node Number of the offending identically-named node.
Thus the FimmWave command `nodestring + ".subnodes[ nodenum ].delete` will delete the existing node with the same name.
Examples
--------
Get modified nodename & nodenum of same-named Proj, delete/rename existing node if needed.
>>> nodestring = "app"
>>> newprjname, samenodenum = check_node_name( prjname, nodestring=nodestring, overwrite=False, warn=True )
Create the new node with returned name, which was modified if needed:
>>> fimm.Exec( "app.addsubnode(fimmwave_prj," + str( newprjname ) + ")" )
Do the same, but with `overwrite=True`, ensuring that the name we specify will be used.
>>> prjname = "My New Project"
>>> check_node_name( prjname, nodestring="app", overwrite=True )
>>> fimm.Exec( "app.addsubnode(fimmwave_prj," + str( prjname ) + ")" )
'''
N_nodes = int( fimm.Exec(nodestring+".numsubnodes()") )
SNnames = [] #subnode names
for i in range(N_nodes):
SNnames.append( strip_txt( fimm.Exec(nodestring+r".subnodes["+str(i+1)+"].nodename()") ) )
# trim whitespace via string's strip(), strip the two EOL chars '\n\x00' from end via indexing [:-2]
# check if node name is in the node list:
sameprojidx = np.where( np.array(SNnames) == np.array([name]) )[0]
#if DEBUG(): print "Node._checkNodeName(): [sameprojname] = ", sameprojname, "\nSNnames= ", SNnames
if len( sameprojidx ) > 0:
'''if identically-named node was found'''
if warn or WARN(): print "WARNING: Node name `" + name + "` already exists; using option `overwrite = %s`"%(overwrite)
if DEBUG(): print warn, WARN()
sameprojname = SNnames[sameprojidx]
sameprojidx = sameprojidx[0]+1 # FimmWave index to the offending node
if overwrite == 'reuse':
overwrite=False
reuse=True
if overwrite:
if sameprojidx == N_nodes:
'''It is the last node entry, so delete the offending identically-named node'''
if warn or WARN(): print "node '%s'.buildNode(): Deleting existing Node # %s"%(name,str(sameprojidx)) + ", `%s`."%(sameprojname)
fimm.Exec( nodestring + ".subnodes[%i].delete()"%(sameprojidx) )
else:
'''It is not the last entry in the node list, so we can't delete it without breaking other pyFIMM references.'''
# change the name of offending node:
newname = name + "." +str( get_next_refnum() )
if warn or WARN(): print "node '%s'.buildNode(): Renaming existing Node #"%(name) + str(sameprojidx) + ", `%s` --> `%s`."%(sameprojname, newname)
fimm.Exec( nodestring + ".subnodes[%i].rename( "%(sameprojidx) + newname + " )" )
else:
if not reuse:
'''change the name of this new node'''
name += "." +str( get_next_refnum() ) #dt.datetime.now().strftime('.%f') # add current microsecond to the name
if warn or WARN(): print "\tNew Node name changed to: ", name
else:
if DEBUG(): print "Node name `%s` is unique." % name
pass
return name, sameprojidx
#end checknodename()
def get_next_refnum():
'''Returns a 6-digit random number to use for naming new FimmWave references/nodes. Will ensure that a duplicate is never returned. All used values are stored in the pyFIMM global variable `global_refnums`.'''
global global_refnums
try:
global_refnums
except NameError:
global_refnums = [] # default value if unset
cont, i = 1,1
while cont == 1:
''' If random number `r` is already in the global list, make a new one '''
r = random.randint(100000,999999) # 6-digit random number
if len( np.where( np.array(global_refnums) == np.array([r]) )[0] ) == 0:
''' If random number `r` is not in the global list, continue '''
cont = 0 # stop the loop
# make sure the loop doesn't run away, in case the used has made 1 million objects!
i = i+1
if i > 1000:
cont = 0
raise UserWarning("Could not generate a random number after 1000 iterations! Aborting...")
# end while(cont)
global_refnums.append( r )
return global_refnums[-1] # return last random number
#end get_next_refnum()
####################################################
# Classes
####################################################
class Node(object):
"""class Node: creates an internal representaiton of a Fimmwave node
Node() - Creates TimeStamped Node Name, Number 0, No Parent or Children
Node('NameOfNode')
Node('NameOfNode', NodeNumber)
Node('NameOfNode', NodeNumber, ParentNodeObject)
Node('NameOfNode', NodeNumber, ParentNodeObject, Children)
If 'NameOfNode' already exists, the name will be modified by adding a random number to the end as ".123456".
The modified name can be found in the variable: `Node.name`
if the keyword argument `overwrite=True` is provided, then an existing Node with the same name would be deleted upon building."""
def __init__(self,*args, **kwargs):
if len(args) >= 0:
self.name = 'Fimmwave Node ' + dt.datetime.now().strftime("%Y-%m-%d %H.%M.%S")
self.num = 0
self.parent = None
self.children = []
self.type = None
self.savepath = None
self.nodestring = None
if len(args) == 1:
self.name = args[0]
elif len(args) == 2:
self.name = args[0]
self.num = args[1]
elif len(args) == 3:
self.name = args[0]
self.num = args[1]
self.parent = args[2]
elif len(args) == 4:
self.name = args[0]
self.num = args[1]
self.parent = args[2]
self.children = args[3]
elif len(args) >= 5:
print 'Invalid number of input arguments to Node()'
#overwrite = kwargs.pop('overwrite', False) # to overwrite existing project of same name
#warn = kwargs.pop('warn', True) # display warning is overwriting?
"""
## Check if top-level node name conflicts with one already in use:
#AppSubnodes = fimm.Exec("app.subnodes") # The pdPythonLib didn't properly handle the case where there is only one list entry to return. Although we could now use this function, instead we manually get each subnode's name:
N_nodes = int( fimm.Exec("app.numsubnodes()") )
SNnames = []
for i in range(N_nodes):
SNnames.append( fimm.Exec(r"app.subnodes["+str(i+1)+"].nodename()").strip()[:-2] )
# trim whitespace with string's strip(), strip the EOL chars '\n\x00' from end with indexing [:-2]
# check if node name is in the node list:
sameprojname = np.where( np.array(SNnames) == np.array([self.name]) )[0]
if DEBUG(): print "Node.buildNode(): [sameprojname] = ", sameprojname, "\nSNnames= ", SNnames
if len( sameprojname ) > 0:
'''if identically-named node was found'''
if overwrite:
'''delete the offending identically-named node'''
if warn or WARN(): print "Deleting Node #" + str(sameprojname) + " `" + SNnames[sameprojname] + "`."
sameprojname = sameprojname[0]+1
fimm.Exec("app.subnodes["+str(sameprojname)+"].delete()")
else:
'''change the name of this new node'''
if warn or WARN(): print "WARNING: Node name `" + self.name + "` already exists;"
self.name += "." +str( get_next_refnum() ) #dt.datetime.now().strftime('.%f') # add current microsecond to the name
print "\tNode name changed to: ", self.name
#end if(overwrite)
else:
if DEBUG(): print "Node name is unique."
#end if(self.name already exists aka. len(sameprojname)
"""
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Node(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
#end __init__()
def _checkNodeName(self, nodestring, overwrite=False, warn=False):
'''Check for duplicate node name, overwrite if desired.
nodestring : string
string to reference the FimmWave node, omitting trailing period. eg.
app.subnodes[1].subnodes[3]
overwrite : { True | False }, optional
warn : { True | False }, optional
Print warning? Defaults to False, but still prints if the global pyFIMM.set_WARN() is True, which it is by default. Use set_WARN()/unset_WARN() to alter.
'''
## Check if top-level node name conflicts with one already in use:
#AppSubnodes = fimm.Exec("app.subnodes") # The pdPythonLib didn't properly handle the case where there is only one list entry to return. Although we could now use this function, instead we manually get each subnode's name:
N_nodes = int( fimm.Exec(nodestring+".numsubnodes()") )
SNnames = [] #subnode names
for i in range(N_nodes):
SNnames.append( fimm.Exec(nodestring+r".subnodes["+str(i+1)+"].nodename()").strip()[:-2] )
# trim whitespace via string's strip(), strip the two EOL chars '\n\x00' from end via indexing [:-2]
# check if node name is in the node list:
sameprojname = np.where( np.array(SNnames) == np.array([self.name]) )[0]
#if DEBUG(): print "Node._checkNodeName(): [sameprojname] = ", sameprojname, "\nSNnames= ", SNnames
if len( sameprojname ) > 0:
'''if identically-named node was found'''
if overwrite:
'''delete the offending identically-named node'''
if warn or WARN(): print "Overwriting existing Node #" + str(sameprojname) + ", `" + SNnames[sameprojname] + "`."
sameprojname = sameprojname[0]+1
fimm.Exec(nodestring+".subnodes["+str(sameprojname)+"].delete()")
else:
'''change the name of this new node'''
if warn or WARN(): print "WARNING: Node name `" + self.name + "` already exists;"
self.name += "." +str( get_next_refnum() ) # add numbers to the name
print "\tNode name changed to: ", self.name
#end if(overwrite)
else:
#if DEBUG(): print "Node name is unique."
pass
#end if(self.name already exists aka. len(sameprojname) )
def set_parent(self, parent_node):
self.parent = parent_node
parent_node.children.append(self)
def delete(self):
fimm.Exec( "%s.delete()"%(self.nodestring) )
def Exec(self, fpstring, check_built=True, vars=[]):
'''Send raw command referencing this Node.
For example:
MyWaveGuide.Exec( "findorcreateview()" ) # to make FimmWave show the Waveguide window
Note the initial period `.` is not needed.
Internally, this can replace the older syntax of
fimm.Exec( self.nodestring + '.findorcreateview()' )
fimm.Exec( '%s.findorcreateview()'%(self.nodestring) )
with the simpler
self.Exec( 'findorcreateview()' )
See `help(pyfimm.Exec)` for additional info.
Parameters
----------
fpstring : str
FimmProp command to send to this Node. Omit initial period.
check_built: { True | False }, optional
If True, will raise an error if the Node does not have it's `built` flag set. Otherwise will ignore the `built` flag.
vars : list, optional
Similar to pyfimm.Exec(), a list of arguments to pass.
Returns
-------
If anything is returned by the FimmProp commandline, the output will be sanitized and returned.
Lists will have the `None` elements removed, and Strings will have the EOF character removed.
'''
if check_built:
if not self.built:
raise UserWarning( "Node is not built yet, can't reference this Node yet! Please run `MyNode.Build()` first." )
out = fimm.Exec( self.nodestring + "." + fpstring, vars)
if isinstance(out, list): out = strip_array(out)
if isinstance(out, str): out = strip_text(out)
return out
#end class Node
class Project(Node):
"""Return a new Fimmwave Project.
Project inherits from the Node class.
DEPRECATED: Arguments are passed to the Node class constructor - type help('pyFIMM.Node') for available arguments.
The Project node is only built in FimmWave when you call `ProjectObj.buildNode()`.
Please type `dir(ProjectObj)` or `help(ProjectObj)` to see all the attributes and methods available.
Parameters
----------
name : string
Set the fimmwave name for this node.
buildNode : { True | False }, optional
build the project node right away? Requires than a name is passed.
overwrite : { True | False }, optional
Only valid if `buildNode=True`. If True, will delete a project already open in FimmWave with the same name if it's the last project in the FimmWave list, otherwise will rename the offending Project (retaining desired name of this new Project). If False, and a similarly-named Project exists in FimmWave, will modify the supplied project name.
The modified name is created by adding a random number to the end, such as "NewNodeName.123456", and can be found in the variable: `ProjectObj.name`.
Attributes
----------
Once ProjectObj.buildNode() has been called, the following attributes are available (they are set to `None` beforehand):
name : string, name of the FimMWave Node
num : int, number of this node in FimmWave
nodestring : string, to access this node in FimmWave. Eg. `app.subnodes[5]`, omitting trailing period `.`.
savepath : string, the path to file for the project.
origin : { 'pyfimm' | 'fimmwave' }
Indicates whether this Device was built using pyFIMM, or was constructed in FimmWave & imported via `import_device()`.
"""
def __init__(self, name=None, buildNode=False, overwrite=False, warn=False , *args, **kwargs):
#build = kwargs.pop('buildNode', False) # to buildNode or not to buildNode?
#overwrite = kwargs.pop('overwrite', False) # to overwrite existing project of same name
super(Project, self).__init__(name) # call Node() constructor, passing extra args
## Node('NameOfNode', NodeNumber, ParentNodeObject, Children)
self.built = False
self.num = self.nodestring = self.savepath = None
self.variablesnode = None
if name: self.name = name
#kwargs.pop('overwrite', False) # remove kwarg's which were popped by Node()
#kwargs.pop('warn', False)
if buildNode: self.buildNode(overwrite=overwrite, warn=warn ) # Hopefully Node `pops` out any kwargs it uses.
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Project(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
def buildNode(self, name=None, overwrite=False, warn=False):
'''Build the Fimmwave node of this Project.
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
If `name` is not provided as an argument here, it should be pset via `MyProj.name = "NewName"` before calling `buildNode()`.
overwrite : { True | False }, optional
If True, will delete a project already open in FimmWave with the same name if it's the last project in the FimmWave list, otherwise will rename the offending Project (retaining desired name of this new Project). If False, and a similarly-named Project exists in FimmWave, will modify the supplied project name.
The modified name is created by adding a random number to the end, such as "NewNodeName.123456", and can be found in the variable: `ProjectObj.name`.
'''
if DEBUG(): print "Project.buildNode():"
if name: self.name = name
self.type = 'project' # unused!
""" Deprecated - using check_node_name() instead.
## Check if top-level (project) node name conflicts with one already in use:
#AppSubnodes = fimm.Exec("app.subnodes") # The pdPythonLib didn't properly handle the case where there is only one list entry to return. Although we could now use this function, instead we manually get each subnode's name:
N_nodes = int( fimm.Exec("app.numsubnodes()") )
SNnames = [] #subnode names
for i in range(N_nodes):
SNnames.append( fimm.Exec(r"app.subnodes["+str(i+1)+"].nodename()").strip()[:-2] )
# trim whitespace via string's strip(), strip the two EOL chars '\n\x00' from end via indexing [:-2]
# check if node name is in the node list:
sameprojidx = np.where( np.array(SNnames) == np.array([self.name]) )[0]
if DEBUG(): print "Node '%s'.buildNode(): [sameprojname] = " % self.name, sameprojidx, "\nSNnames= ", SNnames
if len( sameprojidx ) > 0:
'''if identically-named node was found'''
if overwrite:
'''delete the offending identically-named node'''
print self.name + ".buildNode(): Overwriting existing Node #" + str(sameprojidx) + ", `" + SNnames[sameprojidx] + "`."
sameprojidx = sameprojidx[0]+1
fimm.Exec("app.subnodes["+str(sameprojidx)+"].delete()")
else:
'''change the name of this new node'''
print self.name + ".buildNode(): WARNING: Node name `" + self.name + "` already exists;"
self.name += "." +str( get_next_refnum() ) #dt.datetime.now().strftime('.%f') # add current microsecond to the name
print "\tNode name changed to: ", self.name
#end if(overwrite)
else:
#if DEBUG(): print "Node name is unique."
pass
#end if(self.name already exists) aka. len(sameprojname)
"""
nodestring = "app" # the top-level
self.name, samenodenum = check_node_name( self.name, nodestring=nodestring, overwrite=overwrite, warn=warn ) # get modified nodename & nodenum of same-named Proj, delete/rename existing node if needed.
'''Create the new node: '''
N_nodes = fimm.Exec("app.numsubnodes()")
node_num = int(N_nodes)+1
fimm.Exec("app.addsubnode(fimmwave_prj,"+str(self.name)+")")
self.num = node_num
self.nodestring = "app.subnodes[%i]" % self.num
self.savepath = None
self.built = True
#end buildNode()
def save_to_file(self, path=None, overwrite=False):
'''savetofile(path):
Save the Project to a file. Path is subsequently stored in `Project.savepath`.
Parameters
----------
path : string, optional
Relative (or absolute?) path to file. ".prj" will be appended if it's not already present.
If not provided, will assume the Project has been saved before, and will save to the same path (you should set `overwrite=True` in this case).
overwrite : { True | False }, optional
Overwrite existing file? False by default. Will error with "FileExistsError" if this is False & file already exists.
'''
if path == None:
if self.savepath:
path = self.savepath
else:
ErrStr = self.name + '.savetofile(): path not provided, and project does not have `savepath` set (has never been saved before). Please provide a path to save file.'
raise ValueError(ErrStr)
if not path.endswith('.prj'): path = path + '.prj' # append '.prj' if needed
if os.path.exists(path) and overwrite:
print self.name + ".savetofile(): WARNING: File `" + os.path.abspath(path) + "` will be overwritten."
fimm.Exec("app.subnodes[{"+str(self.num)+"}].savetofile(" + path + ")")
self.savepath = os.path.abspath(path)
print self.name + ".savetofile(): Project `" + self.name + "` saved to file at: ", os.path.abspath(self.savepath)
elif os.path.exists(path) and not overwrite:
raise IOError(self.name + ".savetofile(): File `" + os.path.abspath(path) + "` exists. Use parameter `overwrite=True` to overwrite the file.")
else:
fimm.Exec( "%s.savetofile"%(self.nodestring) + "(%s)"%(path) )
self.savepath = os.path.abspath(path)
print self.name + ".savetofile(): Project `" + self.name + "` saved to file at: ", os.path.abspath(self.savepath)
#end if(file exists/overwrite)
#end savetofile()
def set_variables_node(self, fimmpath, warn=False):
'''Set the Variables Node to use for all nodes in this Project. pyFIMM only supports the use of a single Variables node, even though FimmWave allows you to have numerous variables. Local variables (within a Waveguide or Device node) are not supported.
Use MyProj.set_variable() / get_variable() to set/get variable values.
Parameters
----------
fimmpath : string, required
The FimmProp path to the Variable node, within this project. This takes the form of something like "My Variables" if the Variables node named "My Variables" is at the top-level of the FimmProp Project, or "NodeName/My Variables" is the Variables node is under another Node.
'''
self.variablesnode = Variables( self, fimmpath )
def checkvar(self, var):
'''If `var` is a string, check if it can be evaluated using the Project's variables node. If `var` is numeric, it is returned as-is.'''
if isinstance(var, str):
if self.variablesnode == None:
WarnStr = "Project(%s).checkvar: "%(self.name) + "String `%s` unable to be evaluated - no variables node found in the project. "%(var) + "(Use `MyProj.set_variables_node()` to identify the variables node.)"
if warn or WARN(): print WarnStr
out = var # return unchanged
else:
try:
out = self.variablesnode.get_var( var )
except ValueError:
'''Variable wasn't found in FW'''
out = var
#end try
#end if(variablesnode)
else:
out=var
#end if(str)
return out
#end checkvar
#end class(Project)
# Note! Project.import_device() is added in the file __Device.py, to avoid cyclic imports!
def import_project(filepath, name=None, overwrite=False, warn=False):
'''Import a Project from a file.
filepath : string
Path (absolute or relative?) to the FimmWave .prj file to import.
name : string, optional
Optionally provide a name for the new Project node in Fimmwave. If omitted, the Project name saved in the file will be used.
overwrite : { True | False | 'reuse' }, optional
If True, will overwrite an already-open Fimmwave project that has the same name in Fimmwave. If False, will append timestamp (ms only) to supplied project name.
If 'reuse', then a Project that is already open in FimmWave will simply be pointed to by the new object, but not altered in any way.
False by default.
warn : { True | False }, optional
Print or suppress warnings when nodes will be overwritten etc. False by default, but still prints if the global pyFIMM.WARN() is True, which it is by default. Use set_WARN()/unset_WARN() to alter.
'''
'''For ImportDevice: Path should be path (string) to the FimmWave node, eg. 'Dev1' if Device withthat name is in the top-level of the project, or 'Dev1/SubDev' if the target Device is underneath another Device node.'''
# Create Project object. Set the "savepath", 'num', 'name' attributes of the project.
# return a project object
if DEBUG(): print "importProject():"
if os.path.isfile(filepath):
savepath = os.path.abspath(filepath)
else:
ErrStr = "FimmProp Project file does not exist at the specified path `%s`" %(filepath)
raise IOError(ErrStr)
# Open the project file, and
# make sure the project name isn't already in the FimmWave node list (will pop a FimmWave error)
if name is None:
# Get name from the Project file we're opening
prjf = open(filepath)
prjtxt = prjf.read() # load the entire file
prjf.close()
import re # regex matching
''' In the file:
begin <fimmwave_prj(1.0)> "My Project Name"
'''
prjname_pattern = re.compile( r'.*begin \<fimmwave_prj\(\d\.\d\)\> "(.*)".*' )
# perform the search:
m = prjname_pattern.search( prjtxt ) # use regex pattern to extract project name
# m will contain any 'groups' () defined in the RegEx pattern.
if m:
prjname = m.group(1) # grab 1st group from RegEx
if DEBUG(): print 'Project Name found:', m.groups(), ' --> ', prjname
#groups() prints all captured groups
else:
prjname = name
nodestring = "app"
# get modified nodename & nodenum of same-named Proj, delete/rename existing node if needed.
newprjname, samenodenum = check_node_name( prjname, nodestring=nodestring, overwrite=overwrite, warn=warn )
if DEBUG(): print "import_project(overwrite=%s): "%overwrite + "newprjname, samenodenum = ", newprjname, " , ", samenodenum
if overwrite=='reuse' and samenodenum:
# if want to reuse already-open node, and there is a node with the same name
# populate the object properties:
prj = Project(prjname) # new Project obj
prj.type = 'project' # unused!
prj.num = samenodenum # existing node number
prj.built = True
prj.nodestring = "app.subnodes[%i]"%(prj.num)
prj.name = prj.Exec( 'nodename()' )
prj.savepath = prj.Exec( 'filename()' )
prj.origin = 'fimmwave'
else:
'''Create the new node: '''
N_nodes = fimm.Exec("app.numsubnodes()")
node_num = int(N_nodes)+1
if DEBUG(): print "import_project(): app.subnodes ", N_nodes, ", node_num = ", node_num
'''app.openproject: FUNCTION - ( filename[, nodename] ): open the specified project with the specified node name'''
fimm.Exec("app.openproject(" + str(filepath) + ', "'+ newprjname + '" )' ) # open the .prj file
# populate the object properties:
prj = Project(prjname) # new Project obj
prj.type = 'project' # unused!
prj.num = node_num
prj.savepath = savepath
prj.built = True
prj.nodestring = "app.subnodes[%i]"%(prj.num)
prj.name = strip_txt( fimm.Exec( "%s.nodename() "%(prj.nodestring) ) )
prj.origin = 'fimmwave'
return prj
#end ImportProject()
# Alias to the same function:
import_Project = import_project
'''
## FimmWave commands for opening a project file:
app.openproject(T:\MZI Encoder\MZI Encoder v8.prj,"") <-- see if 2nd arg is NodeName, if so, could obviate issue with re-opening a project (name already exists)
app.subnodes[1].nodename
MZI Encoder
app.subnodes[1].findnode(/SiN Slab)
could not find node "/SiN Slab"
app.subnodes[1].findnode(SiN Slab)
app.subnodes[1].findnode(SiN Slab)
app.subnodes[1].filename
T:\MZI Encoder\MZI Encoder v8.prj
app.openproject(T:\MZI Encoder\MZI Encoder v8.prj,"")
app.subnodes[1].delete()
app.openproject(T:\MZI Encoder\MZI Encoder v8.prj,"")
app.subnodes[1].writeblock()
begin <fimmwave_prj(1.0)> "MZI Encoder"
begin <pdVariablesNode(1.0)> "Variables 1"
tCore = 0.06
wCore = 1.25
tUClad = 1
...
...
...
'''
class Variables(Node):
'''Variables( project, fimmpath )
A class to reference a FimmProp Variables node.
Used as a child to a Project object.
The Variable's parent Project should have been created in pyFIMM beforehand. To grab a Variable node from a file, use `newprj = pyFIMM.import_project()` to generate the Project from a file, and then call `newprj.set_variables_node()`.
Parameters
----------
project : pyFIMM Project object, required
Specify the pyFIMM Project from which to acquire the Device.
fimmpath : string, required
The FimmProp path to the Variable node, within this project. This takes the form of something like "My Variables" if the Variables node named "My Variables" is at the top-level of the FimmProp Project, or "NodeName/My Variables" is the Variables node is under another Node.
Please use `dir(VarObj)` or `help(VarObj)` to see all the attributes and methods available. A partial list is shown here:
Attributes
----------
VarObj.origin : { 'fimmwave' }
This indicates that this Node was Not constructed by pyFIMM, and so has a slightly lacking set of attributes (detailed further in this section). A python-constructed pyFIMM object has the value 'pyfimm'.
Methods
-------
VarObj.get_all(): Return a Dictionary of all variables in the node.
VarObj.add_var( 'VarName', value=VarValue ): Add a new variable and optionally set it's value.
VarObj.get_var( 'VarName' ): Return the value of a specific variable.
VarObj.set_var( 'VarName', Value ): Set the value of a variable in the FimmWave node.
'''
def __init__(self, *args):
'''If no args, return empty object
if two args, assuem they are (projectobj, fimmpath)'''
if len(args) == 0:
'''no args provided'''
self.parent=None
self.origin=None
self.name=None
self.num=None
self.nodestring=None
self.built=None
elif len(args) == 2:
'''2 args: ProjectObj, fimmpath
This is the standard usage'''
project = args[0]
if not isinstance(project, Project): raise ValueError("1st argument should be a pyFIMM Project object!")
fimmpath = str( args[1] )
self.parent = project
self.origin = 'fimmwave'
self.name = fimmpath.split('/')[-1] # get the last part of the path
self.num = None
varname = "Vars_%i" %( get_next_refnum() ) # generate dev reference name
# create fimmwave reference to the Device:
fpStr = "Ref& %s = "%(varname) + project.nodestring + '.findnode("%s")'%(fimmpath)
if DEBUG(): print fpStr
ret = fimm.Exec( fpStr )
ret = strip_txt( ret )
if DEBUG(): print "\tReturned:\n%s"%(ret)
self.nodestring = varname # use this to reference the node in Fimmwave
ret = strip_txt( fimm.Exec( '%s.objtype'%(self.nodestring) ) )
if ret != 'pdVariablesNode':
ErrStr = "The referenced node `%s` is not a FimmProp Variables node or couldn't be found!\n\t"%(fimmpath) + "FimmWave returned object type of:\n\t`%s`."%(ret)
raise ValueError(ErrStr)
self.built=True
else:
ErrStr = "Invalid number of arguments to Variables.__init__(). Got:\n\t%s"%(args)
raise ValueError( ErrStr )
#end if( number of args )
#end Variables.init()
def __str__(self):
'''How to `print()` this object.'''
vars = self.get_all()
string = "Variables Node '%s' in Project '%s'\n"%(self.name, self.parent.name)
string += "%i variables\n"%(len(vars))
for s in vars.iteritems():
string += "%s : %s"%(s[0], s[1])
seval = self.get_var(s[0])
if s[1] != seval:
'''If statement can be evaluated further'''
string += " = %s"%(seval)
string += '\n'
return string
def add_var(self, varname, value=None):
'''Add a variable to the Variables Node.
varname : str, required
The name for this variable.
value : will be converted to string, optional
If provided, will subsequently set the variable value with `VarObj.set_var( )`.
'''
self.Exec( 'addvariable("%s")'%(varname) )
self.set_var( varname, value )
if DEBUG(): print( "VarNode '%s': "%self.name + "Added variable %s"%varname )
def set_var(self, varname, value):
'''Set the value of a fimmwave variable.
varname : str, required
The name for this variable.
value : str or numeric, required
Set the variable value.
'''
self.Exec( 'setvariable("%s","%s")'%(varname, value) )
if DEBUG(): print( "VarNode '%s': "%self.name + "Set variable %s = %s"%(varname, value) )
def get_var(self, varname):
'''Return the value of a single variable as evaluated by FimmWave.
If the variable is a formula, fimmwave will return the final value resulting from evaluating the formula. All results are converted to a numeric type, unless the variable contains a statement that FimmWave is unable to evaluate, in which case the statement is returned as a string.'''
fpStr = self.Exec( 'getvariable("%s")'%(varname) )
fpStr = eval_string( fpStr )
if fpStr == '':
ErrStr = "Variable `%s` not found in Project('%s').VariablesNode('%s')."%(varname, self.parent.name, self.name)
raise ValueError( ErrStr )
return fpStr
def get_all(self):
'''Return all available variables as a dictionary. This will interrogate FimmWave to get all currently defined variables in the node.
A dictionary will be returned, with all numeric variables being converted to numbers, while references/formulae will be returned as strings (unevaluated by FimmWave - use `get_var()` to have FimmWave calculate the values).'''
fpStr = self.Exec( 'writeblock()' )
fpStr = [ x.strip() for x in fpStr.splitlines()[1:-1] ]
if DEBUG(): print "Variables in '%s':\n\t%s"%(self.name, fpStr )
out={} # dictionary to output
for line in fpStr:
key = line.split(' = ')[0]
val = line.split(' = ')[-1]
out[key] = eval_string( val )
return out
"""
## FimmWave code for Variables Nodes:
app.subnodes[4].addsubnode(pdVariablesNode,"Variables 1")
app.subnodes[4].subnodes[2].findorcreateview()
app.subnodes[4].subnodes[2].addvariable(a)
app.subnodes[4].subnodes[2].setvariable(a,"999")
app.subnodes[4].subnodes[2].getvariable("a")
999
"""
#end class(Variables)
class Material(object):
"""Create a new pyFimm Material with refractive index & k (loss coefficient):
To produce a simple refractive-index type of material, pass the refractive index (float) as the first argument:
>>> Silicon = pyfimm.Material( 3.569 )
this assumes loss, k=0. Pass a non-zero imaginary/absorption component (k) if desired:
>>> Silicon = pyfimm.Material( 3.569 , 0.0012 )
To utilize a wavelength-dependent model (without having to rebuild the structure each time), you must provide a fimmwave Material Database via
>>> pyfimm.set_material_database('C:\FimmWave\matref.mat')
and then pass a string as the first argument, like so:
>>> Silicon = pyfimm.Material( 'Silicon' )
choose the mole ratio in the second argument,
>>> Al20_Ga80_As = pyfimm.Material( 'AlGaAs', 0.20 ) # 20% Aluminum
or, for quaternary material, mole-ratio x & y (aka. mx & my):
>>> In_Ga51_As49_P = pyfimm.Material( 'InGaAsP', 0.51, 0.49 ) #51% Ga, 49% As
mx & my can also be set with keyworded args for clarity, as so:
>>> InGa51As49P = pyfimm.Material( 'InGaAsP', mx=0.51, my=0.49 )
You will have to open the fimmwave material database file for details on the materials available & definition of the mole-ratio parameters (in one case they are actually wavelengths...).
Called with no arguments, `Material()` returns a material like air with n=1.0, k=0.
Material objects are subsequently used to create waveguides with these materials, like so:
>>> Silicon = Material(3.4) # set refractive index of Silicon material object
>>> core = Silicon( 1.50 ) # call the Material object to return a Layer of given thickness
Here, `core` is a Layer object with thickness 1.50 um & refractive index from the Silicon object, of 3.4
Can also set the layer as the Confinement Factor area (cfseg), as so:
>>> core = Silicon( 1.50, cfseg=True) # sets this layer's `cfseg` flag
"""
def __init__(self,*args, **kwargs):
if len(args) == 0:
'''Air by default'''
self.type='rix' #refractive index type
self.n = 1.0
self.k = 0.0
self.mat=None
self.mx=None
self.my=None
elif len(args) >= 1:
if isinstance(args[0], str):
# if 1st arg is a string:
self.type='mat' # use material database
self.mat=args[0] # material name
self.mx = None
self.my = None
self.n = None
self.k = None
else:
self.type='rix' # refractive index type
self.n = args[0] # RIX index
self.k = 0.0
self.mat=None
self.mx=None
self.my=None
if len(args) >= 2:
if self.type=='mat':
self.mx = args[1] # mole ratio x
self.my = None
else:
self.k = args[1] # RIX loss - k coeff.
if len(args) ==3:
if self.type=='mat':
self.my=args[2] # mole ratio y
else:
raise ValueError("Invalid number of arguments for Refractive Index-type of material.")
if len(args) >= 4:
raise ValueError('Invalid number of input arguments to Material Constructor')
# Allow some params to be set by keyword args, if not already set:
if not self.mx: self.mx = kwargs.pop('mx', None)
if not self.my: self.my = kwargs.pop('my', None)
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Material(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
#end __init__
def __str__(self):
'''How to `print` this object'''
if self.type == 'rix':
return 'n = %1.4f' % self.n + '\n' + 'k = %1.4f' % self.k
else:
return 'Material = "%s" \n' %(self.mat) + "with mx=%s & my=%s." %(self.mx, self.my)
def __call__(self,length, cfseg=False):
'''Calling a Material object with one argument creates a Layer of passed thickness and refr.index of Material, and returns a list containing this new Layer. For example:
>>> Silicon = Material(3.4)
>>> core = Silicon( 1.50 )
Here, core is a list containing one Layer object with thickness 1.50 um & refractive index from the Silicon object, of 3.4
Can also set the layer as the Confinement Factor area (cfseg), as so:
>>> core = Silicon( 1.50, cfseg=True) # sets this layer as the cfseg
'''
# Always call Layer with 3 args, but CFseg is False by default.
out = [ Layer( self, length, cfseg ) ] # include cfseg
return out
#end __call__
#end class(Material)
class Layer:
"""Layer( mat, thick, CFseg)
Create new pyFimm Layer, a slab waveguide of some index and thickness.
Usually not created manually, but instead returned when user passes a thickness to a Material object.
Parameters
----------
mat : Material object
Material object, provides n & k info
thick : float
Thickness of this layer
CFseg : { True | False }
Should this layer be considered in confinement factor (CF) calculations? Sets the FimmWave `cfseg` bit.
Examples
--------
Typically created by calling a Material object:
>>> Silicon = Material( 3.44 )
Call this Material object with a thickness applied, returns a Layer object:
>>> Layer = Silicon( 0.150 )
But we usually just pass this layer directly to the Slice constructor, eliminating the middle-man for simplicity:
>>> CoreSlice = Slice( SiO(5.0) + Silicon(0.150) + SiO(5.0) )
It's not recommended, but you can create a Layer by itself like so:
>>> Layer() - empty class
>>> Layer( Material )
>>> Layer( Material, Thickness)
>>> Layer( Material, Thickness, CFseg )"""
def __init__(self,*args):
if len(args) == 0:
self.material = []
self.thickness = 0
self.cfseg = False
elif len(args) == 1:
self.material = args[0]
self.thickness = 0
self.cfseg = False
elif len(args) == 2:
'''This case is used when calling a Material object with one arg'''
self.material = args[0]
self.thickness = args[1]
self.cfseg = False
elif len(args) == 3:
'''This case is used when calling a Material object with two args.'''
self.material = args[0]
self.thickness = args[1]
self.cfseg = args[2]
else:
raise ValueError( 'Invalid number of input arguments to Layer Constructor' )
def __str__(self):
'''How to `print` this object'''
mat = self.material
return '%s' % mat + '\n' + 'thickness = %7.4f microns' % self.thickness + '\n' + 'cfseg = %s' % self.cfseg
def __add__(self,other):
'''Addition returns a list containing new Layer appended to this Layer'''
return [self,other]
def get_n(self):
'''Return refractive index of Material in this Layer'''
return self.material.n
# alias for this function:
n = get_n
def get_k(self):
'''Return imaginary refractive index (loss) of Material in this Layer'''
return self.material.k
# alias for this function:
k = get_k
def set_cfseg(self):
'''Set this Layer as a cfseg - area to include in confinement factor calculations.'''
self.cfseg = True
def unset_cfseg(self):
'''UnSet this Layer as a cfseg - area won't be included in confinement factor calculations.'''
self.cfseg = False
def get_cfseg(self):
'''Return cfseg status of this Layer as { True | False }.'''
return self.cfseg
#end class(Layer)
class Slice:
"""
Slice( [BunchOfLayers, WidthOfSlice, EtchDepth ] )
pyFimm Slice object, a concatenation of multiple Layer objects (Materials of some thickness).
Can accomodate an arbitrary number of Layers.
This converts the 1-D Layers into a 2-D Slice.
Parameters
----------
BunchOfLayers : list
A List containing all the Layers to be put into this Slice. See examples.
WidthOfSlice : float
The width of this slice, in perpendicular direction as the Layer thicknesses.
This is usually not provided directly here, but instead the width is usually defined when the Slice object is called with a width argument, while making a full rectangular Waveguide object. See examples.
EtchDepth : float, optional
For rectangular waveguides, apply an etch depth to the layer, such that the removed ("etched") portion will be fill in with the material above it.
Examples
--------
Material Objects return a Layer object when called with a thickness argument.
Adding Layer objects together produces a List containing each Layer, so BunchOfLayers is actually created by adding a bunch of Material objects (with thicknesses as the argument) together.
For example, typical usage is as so:
>>> SlabCore = <pyfimm>.Slice( Material1(15.0) + Material2(0.75) + Material1(10.0) )
Creating an imaginary structure from bottom-to-top like so:
top
--------------------
Material1
10.0 um thick
--------------------
Material2
0.750 um thick
--------------------
Material1
15.0 um thick
--------------------
bottom
The Width is usually applied layer, when creating the rectangular Waveguide object, like so:
>>> SlabClad = <pyfimm>.Slice( Material1(15.0+0.75+10.0) )
>>> WG = <pyfimm>.Waveguide( SlabClad(10.0) + SlabCore(2.0) + SlabClad(15.0) )
Creating a full 2-D Waveguide structure from left-to-right like so:
top
---------------------------------------------------------
|<---- 10.0um------>|<-----2.0um------>|<----15.0um---->|
| | Material1 | |
| | 10.0 um thick | |
| |------------------| |
| Material1 | Material2 | Material1 |
| 25.75um | 0.750 um thick | 25.75um |
| thick |------------------| thick |
| | Material1 | |
| | 15.0 um thick | |
---------------------------------------------------------
bottom
Other uses:
initialize an empty Slice object:
>>> Slice()
Pass numerous layers (bottom to top) to concatenate them and make a 1-D Slice:
>>> Slice( BunchOfLayers )
Also set the width of this slice, for use in 2-D profile construction:
>>> Slice( BunchOfLayers, WidthOfSlice )
Lastly, apply an etch to this slice:
>>> Slice( BunchOfLayers, WidthOfSlice, EtchDepth )
Applying an EtchDepth will remove the material from the top of the Slice (last Layer passed) down to EtchDepth, replacing it with the Material of the last Layer passed. For this reason, it is often useful to add a 0-thickness Layer at the end of your BunchOfLayers, eg. air=Layer(1.0, 0.0)"""
def __init__(self,*args):
if len(args) == 0:
self.layers = []
self.width = 0.0
self.etch = 0.0
elif len(args) == 1:
self.layers = []
for lyr in args[0]:
self.layers.append(lyr)
self.width = 0.0
self.etch = 0.0
elif len(args) == 2:
self.layers = []
for lyr in args[0]:
self.layers.append(lyr)
self.width = args[1]
self.etch = 0.0
elif len(args) == 3:
self.layers = []
for lyr in args[0]:
self.layers.append(lyr)
self.width = args[1]
self.etch = args[2]
else:
print 'Invalid number of input arguments to Slice Constructor'
def __str__(self):
'''How to `print` this object'''
str = 'width = %7.4f \n' % self.width
str += 'etch = %7.4f \n' % self.etch
for i,lyr in enumerate(self.layers):
if i == 0:
str += 3*'*' + ' Bottom Layer: ' + 3*'*' + '\n%r' % (lyr) + '\n'
elif i == (len(self)-1):
str += 3*'*' + ' Top Layer: ' + 3*'*' + '\n%r' % (lyr) + '\n'
else:
str += 3*'*' + ' Middle Layer %i: ' % i + 3*'*' + '\n%r' % lyr + '\n'
return str
def __call__(self,width):
'''Calling ThisSlice(Width) sets the Width of this Slice, and returns a list containing this Slice.'''
self.width = width
return [self]
def __add__(self,other):
'''Addition returns a list containing each Slice'''
return [self,other]
def __len__(self):
'''len(ThisSlice) returns the number of Layers in ThisSlice'''
return len(self.layers)
def thickness(self):
'''Return summed thickness of all Layers in this Slice'''
thck = 0
for lyr in self.layers:
thck += lyr.thickness
return thck
def layer_thicknesses(self):
'''Return list of thicknesses of each Layer in this Slice'''
lyr_thck = []
for lyr in self.layers:
lyr_thck.append(lyr.thickness)
return lyr_thck
#end class Slice
class Section:
'''Section( WGobject, length)
Section class applies a Length to a Waveguide object. This object is only used when creating a new pyFIMM Device object, and is usually invisible to the end-user.
This is so that a Device can reference the same WG multiple times, but with a different length each time.
Usually not created manually, but instead returned when user passes a length to a WG (Waveguide or Circ) object.
Parameters
----------
WGobject : Waveguide, Circ or Device object
Waveguide, Circ or Device object, previously built.
length : float
length of this WG, when inserted into Device. Required for Waveguide or Circ objects, not required for Device objects.
To Do:
------
Ability to pass modesolver parameters for this waveguide.
Examples
--------
Typically created by calling a WG (Waveguide or Circ) object while creating a Device:
>>> Device1 = Device( WG1(10.5) + WG2(2.5) + WG3(10.5) )
'''
def __init__(self, *args):
if len(args) == 0:
'''return empty object'''
self.WG = None
self.length = None
if len(args) == 1:
'''Only Waveguide/Device passed. Device.__call__ uses this case'''
#if DEBUG(): print "Section: 1 args=\n", args
self.WG = args[0]
try:
self.length = self.WG.get_length()
except AttributeError:
ErrStr = "Section.__init__(): The specified Waveguide/Device has no method `get_length()`. Please pass a Waveguide, Circ or Device (or similar, eg. Lens) object that has a length.\n\tGot args = " + str(args)
raise AttributeError(ErrStr)
elif len(args) == 2:
'''WG & length passed. Waveguide/Circ.__call__ use this case.'''
#if DEBUG(): print "-- Section: 2 args=\n", args
self.WG = args[0]
self.length = args[1]
else:
raise ValueError( "Invalid number of arguments to Section(). args=" + str(args) )
def __str__(self):
'''How to `print` this object.'''
string='Section object (of pyFIMM module).'
string += '\nlength = %7.4f \n' % self.length
#if self.WG.name: string += 'WG object: `' + self.WG.name + '`, with '
string += 'WG type ' + str(type(self.WG)) + ' and structure:\n' + str(self.WG)
return string
def __add__(self,other):
'''Addition returns a list containing new Section appended to this Section'''
if DEBUG(): print "Section__Add__: \n", [self,other]
return [self,other]
def get_length(self):
'''Return the length of this Section.'''
return self.length
#end class Section
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,429
|
demisjohn/pyFIMM
|
refs/heads/master
|
/example4 - open Device from File v1.py
|
'''
##########################################################################
Example 4:
Import a Project from File, and insert a Device from File into a new Project
##########################################################################
'''
import pyfimm as pf # Every script must begin with this line
pf.connect()
import sys, os
ScriptPath, ScriptFile = os.path.split( os.path.realpath(__file__) ) # Get directory of this script
''' Since we're loading an existing Project, we might not need any of these global parameters. Haven't tested that yet. '''
pf.set_working_directory(ScriptPath) # Set FimmWave directory to the location of your script (needed to capture output files)
pf.set_eval_type('n_eff') # FIMMWAVE will label modes by the effective index (options: n_eff or beta)
pf.set_mode_finder_type('stable') # options: stable or fast
pf.set_mode_solver('vectorial FMM real') # Three words, any permuation of: 'vectorial/semivecTE/semivecTM FDM/FMM real/complex' for RWG.
pf.set_wavelength(1.55) # The unit of space is always 1 micrometer
pf.set_N_1d(100) # # of 1D modes found in each slice (FMM solver only)
pf.set_NX(100) # # of horiz. grid points for plotting & FDM
pf.set_NY(100) # # of vertical grid points for plotting & FDM
pf.set_N(3) # # of modes to solve for
pf.set_material_database('Materials/refbase.mat')
# 1st Make our own project, as usual:
myprj = pf.Project()
myprj.buildNode('Example 4 - Import Device', overwrite=True)
#####################################################
# Import a Device from a saved FimmWave project file
#
# First open the Project file
# Then copy the Device into our own project
#####################################################
#pf.set_DEBUG() # Turn on Debugging verbose output.
# Open a saved Project file:
openedprj = pf.import_Project('T:\Python Work\pyFIMM Simulations\example4 - WG Device 1.prj')
# If the project is already loaded, try `overwrite='reuse'` to prevent reloading it. `overwrite=True` will delete the opened project before loading the file.
'''
`openedprj` now refers to the opened Project file, which contains the Device we want to add to our own Project
You can optionally provide a name to use in FimMWave, along with the usual `overwrite` and `warn` options.
'''
# Copy the Device 'SlabDev' into our own project, myprj:
dev2 = myprj.import_device(project=openedprj, fimmpath='SlabDev')
'''
We just imported a Device into our own Project, myprj. We told it to import it from the opened Project, `openedprj`, and grab the FimMWave node named `SlabDev`.
`dev2` now refers to this new Device, in our own Project. In FimmWave, you will see that the Device has been copied into our own Project, 'Example 4 - Import Device'.
Since the Device was made in FimmWave, not pyFIMM, the object `dev2` does not have knowledge about the device's internal workings (for example, paths and complex layouts). Most Device methods (such as calculating, plotting, getting Smat's) should still work though.
'''
# Do something with the new Device:
print dev2.name + ": Total Device Length = %f um" %( dev2.get_length() )
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,430
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__init__.py
|
'''pyFIMM Documentation:
pyFIMM provides a python interface to Photon Design's FIMMWAVE/FIMMPROP simulation tools.
The interface is set up like Peter Beinstman's CAMFR (CAvity Modelling FRamework) system, in which 1-D Slices are concatenated to produce arbitrary 2-D index profiles (waveguides), which can be further concatenated to produce full 3-D photonic integrated circuits.
Photon Design's pdPythonLib is included in the module.
Originally created by Jared Bauters at the University of California Santa Barbara in 2011.
Updated by Demis D. John, 2015.
Examples
--------
Example of rectangular waveguide construction syntax: We will create a rectangular waveguide of SiO2 cladding and SiN core, calculate the fundamental mode & plot it. `pyfimm` should be replaced with whatever name you imported the pyFIMM module as - for example, if you imported it like so:
>>> import pyfimm as pf
then replace `pyfimm` with `pf` in the following examples.
First, create some Materials with some refractive index:
>>> SiO = pyfimm.Material(1.45) # refractive index of SiO2
>>> SiN = pyfimm.Material(2.01) # refractive index of Si3N4
Then, create some 1-D slabs, by calling those Materials with a thickness value, and adding them together from top to bottom in a Slice:
clad = pyfimm.Slice( SiO(15.75) ) # Thicknesses in microns
core = pyfimm.Slice( SiO(10.0) + SiN(2.5) + SiO(5.0) )
This created an imaginary structure from bottom-to-top, for example `core` looks like:
top
--------------------
SiO
5.0 um thick
--------------------
SiN
2.50 um thick
--------------------
SiO
10.0 um thick
--------------------
bottom
Then make a 2-D structure by calling these Slices with a width value, and adding them together from left to right in a Waveguide:
>>> WG = pyfimm.Waveguide( clad(3.0) + core(1.0) + clad(4.0) ) # Widths in microns
Which creates this imaginary 2-D Waveguide structure from left-to-right:
top
---------------------------------------------------------
|<----- 3.0um------>|<-----1.0um------>|<---- 4.0um---->|
| | SiO | |
| | 5.0 um thick | |
| |------------------| |
| SiO | SiN | SiO |
| 15.75um | 0.750 um thick | 15.75um |
| thick |------------------| thick |
| | SiO | |
| | 10.0 um thick | |
---------------------------------------------------------
bottom
Then tell FimmWave to actually build these structures:
>>> WG.buildNode(name='Waveguide', parent=wg_prj) # Build the Fimmwave Node
Now the RWG waveguide node is available in the Fimmwave GUI. (Note you should have already made a Project node in fimmwave, which is referenced as the `parent` here. See Examples for full code.)
You can then calculate the modes as so:
>>> WG.calc()
And inspect the modes like so:
>>> WG.mode(0).plot() # plots the fundamental mode.
Or extract field values like so:
>>> Mode1_Ex = WG.mode(1).get_field('Ex') # Saves x-direction E-field for 2nd mode
See the Examples directory for full examples, as some details are missing in these.
Requires
--------
numpy,
matplotlib
FimmWave, setup with TCP port number access (see FimmWave manual section on Python usage).
Get help on commands and objects by typing things like:
(after you've created some objects, or run your script with 'interact with shell afterwards' enabled and then try these.)
>>> import pyFIMM as pf # import the module
>>> help( pf )
>>> dir( pf ) # lists all functions and variables provided by the module
>>> help( pf.set_mode_solver ) # help on one function
>>> help( pf.Waveguide ) # help on the Waveguide object
>>> dir ( pf.Waveguide ) # list all functions/variables in the Waveguide object
>>> help( pf.Waveguide.mode(0).plot ) # help on funciton 'plot' of the Waveguide object
>>> help( pf.Circ.buildNode ) # help on the `buildNode` function of the Circ object
or even easier, while building the script try:
>>> clad = pf.Material(1.4456)
>>> core = pf.Material(1.9835)
>>> help(clad) # Will show help on the Material object
>>> strip = pf.Waveguide( side(w_side) + center(w_core) + side(w_side) )
>>> dir(strip) # will show functions of the Waveguide object
>>> help(strip.buildNode) # show help on the Waveguide.buildNode() method
after strip.calc(), try
>>> dir( strip.mode(0) ) # list the functions of a Mode object
>>> help( strip.mode(0).plot ) # detailed help on the mode plotting function
'''
import __version as v # file with the version number.
version = v.versionnum
versiondate = v.versiondate
# Splash screen.
print
print "pyFIMM", v.version, ""
print "Python Interface to Photon Design's FIMMWave software package."
print "Based on Peter Beinstman's CAMFR (CAvity Modelling FRamework) interface."
print
print "Created by Jared Bauters University of California, Santa Barbara & updated by Demis D. John."
print
from __globals import * # import global vars & FimmWave connection object
from __pyfimm import * # import the main module, many global functions, base objects like Project, Material, Slice, Section and some rectangular waveguide functions.
from __Waveguide import * # contains the Waveguide class, including most of the Fimmwave commands for WG creation.
from __Circ import * # contains Circ class & all other functions for cylindrical geometries.
from __Device import * # contains the Device class, for constructing 3-D devices
from __Mode import * # contains the Mode class, for WGobj.mode(0).xyz operations
from __Tapers import * # contains all Taper classes, including WG_Lens
from __Cavity import * # Cavity object & calculations
from __CavityMode import * # contains the CavityMode class, for CavityOb.mode(0).xyz operations
####################################################################################
# Import Proprietary Modules
####################################################################################
from proprietary import * # the 'proprietary' folder contains modules/functions from other institutions.
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,431
|
demisjohn/pyFIMM
|
refs/heads/master
|
/example3 - Cyl DFB Cavity v4.py
|
'''
##########################################################################
Cylindrical waveguide & Device example - a distributed feed-back device
similar to a VCSEL cavity.
Based on the Photon Design web-example:
"Modelling a passive optical cavity (VCSEL, DFB)"
http://www.photond.com/products/fimmprop/fimmprop_applications_17.htm
Calculates the Cavity modes of a cylindrical GaAs/AlGaAs DFB using the
"Cavity Mode Calculator" code written by Vincent Brulis @ Photon Design, 2014
Requires pyFIMM v1.2.8 or greater
##########################################################################
'''
import numpy as np # Array math.functions. Used here for `argmax()`.
import pyfimm as pf # Import the pyFIMM module
''' Get help on commands and objects by typing things into the console, like:
>>> help(pyfimm) or after the above import, >>> help(pf)
>>> help(pyfimm.set_mode_solver)
>>> help(pyfimm.Waveguide)
>>> help( pyfimm.Mode ) # the Mode class, for selecting a mode to work with
>>> help(pyfimm.Circ.buildNode)
or even easier, while building your script try:
>>> AlOx = pyfimm.Material(1.60) # setting up some Materials
>>> AlGaAs = pyfimm.Material(3.25)
>>> help(AlOx) # will show help on the Material object
>>> CurrentAperture = pyfimm.Circ( AlGaAs(3.5) + AlOx(4.5) )
>>> help(CurrentAperture) # will show help on the Circ object
>>> help(CurrentAperture.buildNode) # shows options for Circ.buildNode()
>>> help( CurrentAperture.mode(0) ) # shows functions that can be performed on modes, which are actually Mode objects.
>>> help( CurrentAperture.mode(0).plot ) # help on the mode plotting function
For more verbose output, while programming the libraries for example, set the pyfimm DEBUG parameter like so:
pyfimm.set_DEBUG()
at the point you want debugging output turned on. This will enable various levels of extra output, that aids in finding out where a calculation or bug is occurring. `unset_DEBUG()` can be used to turn off this extra verbosity.
'''
pf.connect() # this connects to the FimmWave application. The FimmWave program should already be open (pdPythonLib.StartApplication() is not supported yet)
wl = 1.100 # center wavelength in microns - sets the Bragg wavelength
# Set Parameters (Your copy of FIMMWAVE has default values for these. You can change more than shown here.
import sys
ScriptPath = sys.path[0] # Get directory of this script
pf.set_working_directory(ScriptPath) # Set FimmWave directory to the location of your script (needed to capture output files)
pf.set_eval_type('n_eff') # FIMMWAVE will label modes by the effective index (options: n_eff or beta)
pf.set_mode_finder_type('stable') # options: stable or fast
pf.set_mode_solver('Vectorial GFS Real') # See `help(pyfimm.set_mode_solver)` for all options.
pf.set_wavelength( wl ) # The unit of space is always 1 micrometer
pf.set_N_1d(100) # Num. of 1D modes found in each slice (FMM solver only)
pf.set_N(3) # Num. of modes to solve for
pf.set_Nm(1) # theta mode order. Can accept start/stop values as list, eg. [1,5]. See `help(pf.set_Nm)`.
pf.set_Np(2) # polarization mode order, also can accept start/stop values as list. See `help(pf.set_Np)`.
dfbproj = pf.Project('Example 3 - DFB Cavity', buildNode=True, overwrite=True) # Create Proj & build the node in one line. `overwrite` will overwrite an existing project with the same name.
# Define materials.
## Refractive indices:
n_GaAs = 3.53
n_AlGaAs = 3.08
CoreHi = pf.Material(n_GaAs) # GaAs
CoreLo = pf.Material(n_AlGaAs) # AlGaAs
Clad = pf.Material(1.56)
rCore = 20/2.
TotalDiam = 30
rClad = TotalDiam/2-rCore
pf.set_circ_pml(0) # thickness of perfectly matched layers for cylindrical (circ) objects
# Fiber waveguides:
Hi = pf.Circ( CoreHi(rCore) + Clad(rClad) )
Lo = pf.Circ( CoreLo(rCore) + Clad(rClad) )
#Hi.set_joint_type("special complete") # default is "complete". Set this before building the FimmProp Device node.
#Lo.set_joint_type("special complete")
# Build these waveguides in FimmWave. The Device will reference the pre-built waveguide nodes.
Hi.buildNode(name='Hi', parent=dfbproj)
Lo.buildNode(name='Lo', parent=dfbproj)
# Lengths
dHi = wl/4/n_GaAs #77.90368e-3
dLo = wl/4/n_AlGaAs #89.28571e-3
# Construct the device, split into two parts with same waveguide type at central split. This is important so that the modal basis set of each half of the cavity is the same.
Nperiods = 50
# Devices are built from left to right:
dfb_left = pf.Device( Lo(1.0) + Nperiods*( Hi(dHi) + Lo(dLo) ) + Hi(dHi/2) )
# DFB_Right has Hi waveguide cut in half at center & quarter-wave shift (Lo section with double length):
dfb_right = pf.Device( Hi(dHi/2) + Lo(dLo*2) + Hi(dHi) + Nperiods*( Lo(dLo) + Hi(dHi)) + Lo(1.0) )
dfb_left.set_joint_type('special complete')
dfb_right.set_joint_type('special complete')
# Build these Devices in FimmProp:
dfb_left.buildNode(name='DFBleft', parent=dfbproj)
dfb_right.buildNode(name='DFBright', parent=dfbproj)
# Show the devices in the FImmWave GUI:
pf.Exec(dfb_right.nodestring + '.findorcreateview()')
pf.Exec(dfb_left.nodestring + '.findorcreateview()')
""" calculate modes of half the cavity only - just for demonstrating Device functions.
This is not pertinent to the Cavity resonance calculation
"""
dfb_right.calc() # calc scattering matrix of this Device (only half the cavity)
dfb_right.plot_refractive_index() # Fig1: show the refractive index versus Z for this device.
dfb_left.set_input([1,0,0], side='right', normalize=True) # launch only 1st Mode from right side
dfb_left.plot('Ex', direction='left') # Fig2: Plot Ex field for this launch, for left-propagating field (since injected on right side)
#dfb_left.plot('Ey', refractive_index=True) # can also plot refractive index on same figure
""" --- Now Calculate the Cavity modes! --- """
#WLs = [1.080, 1.100, 1.120] # for fast example
#WLs = np.arange( 1.100-0.060, 1.100+0.060, 0.005 ) # coarse eigenmode calculation
#WLs = np.concatenate([ np.arange(1.100-0.060, 1.100-0.007, 0.005) , np.arange(1.100-0.007, 1.100+0.007, 0.0005) , np.arange(1.100+0.007, 1.100+0.060, 0.005) ]) # coarse away from resonance, fine at resonance
WLs = np.arange( wl-0.010, wl+0.010, 0.005 ) # refined calc @ resonance only
# Set up Cavity with Left & Right devices:
DFB = pf.Cavity(dfb_left, dfb_right)
DFB.plot_refractive_index() # Fig3: show the refractive index profile along Z, at (x,y)=(0,0)
DFB.calc(WLs) # Calculate the Cavity resonances etc.
# try `dir(DFB)` After calling calc() - you'll see that new variables are available, such as the eigenvectors & resonance wavelengths etc.
#DFB.mode(0).plot() # plot eigenvalues of 1st mode (plot defaults to 'EigV')
DFB.mode('all').plot('EigVals') # Fig4: plot eigenvalues of all modes
# plot resonance fields for 2 of the modes:
DFB.mode( [0,1] ).plot('Ex', 'resonance', refractive_index=True, title="DFB + 1/2-wave") # Fig5: plot Ex field for the resonance wavelengths of specified modes.
"""
To view the transverse cavity mode profile:
In FimmProp, on Either device, select
View > Input Field
And then select the appropriate tab (Left-Hand or Right-Hand input), and
click 'Update' in the Preview area, to see what the superposition of modes
according to the EigenVector looks like.
"""
#pyfimm.disconnect() # close TCP connection to application.
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,432
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Cavity.py
|
'''
Cavity Calculation functions
Demis D. John, 2015, Praevium Research Inc.
Based on Peter Beinstman's CAMFR package's `Cavity` class,
and Vincent Brulis' CavityModeCalc.py example script.
'''
from __globals import * # import global vars & FimmWave connection object
# DEBUG() variable is also set in __globals
import numpy as np
import math
from __pyfimm import get_N, set_wavelength # get number of calculated modes
from __CavityMode import * # import the CavityMode class, for `Cavity.mode(0)...`
class Cavity(object):
'''Cavity class, for calculating cavity modes & fields.
Construct as so:
cav = <pyfimm>.Cavity( LHS_Dev, RHS_Dev )
Parameters
----------
LHS_Dev : Device object
Device representing the left-hand side of the cavity.
RHS_Dev : Device object
Device representing the right-hand side of the cavity.
IMPORTANT NOTE: Wherever you choose to split the cavity (arbitrary), the waveguide cross-section on either side of the split must be the same. For example, for whichever waveguide is near the desired splitting point, cut that waveguide in half, with half in the LHS_Dev & half in the RHS_Dev, so that the waveguide cross section on either side of the split is the same.
This is so that the modal basis set of each half of the cavity will be the same - ie. the eigenvectors calculated will be with respect to the modes of these central waveguides, and if each side's central waveguide had different modes (because they were different waveguide geometries), the eigenvector would not represent the same superposition of modes into each RHS & LHS device.
Attributes
----------
LHS_Dev : Device Object
Left Side Device. Should already have been built via LHS_Dev.buildNode() in FimmWave.
RHS_Dev : Device Object
Right Side Device. Should already have been built via RHS_Dev.buildNode() in FimmWave.
Methods
-------
This is a partial list - see `dir(CavityObj)` to see all methods.
Please see help on a specific function via `help(Cavity.theFunc)` for detailed up-to-date info on accepted arguments etc.
calc( WLs , Display=False)
Calculate the eigenmodes of the cavity at each wavelength. Based on Vincent Brulis' script from the PhotonDesign example "Modelling a passive optical cavity (VCSEL, DFB)".
WLs : array-like
List of wavelengths at which to calculate the cavity modes. This determines the wavelength-accuracy of the resonance wavelengths found - you will have to choose the wavelengths at which to calculate the modes.
See help(Cavity.calc) for more info.
plot() - DEPRECATED
Plot the Eigenvalues versus Wavelength for all modes.
This function has been deprecated - use `Cavity.mode('all').plot()` to do the same thing.
After CavityObj.calc() has been performed, more attributes are available:
Attributes
----------
wavelengths : numpy array
The wavelengths at which cavity eigenvalues were calculated.
eigenvalues, eigenvectors : numpy arrays
The eigenvalues & eigenvectors at each wavelength. There will be N eigenvalues at each wavelength, corresponding to each lateral optical mode of the central Waveguide making up the Devices (the WG at the split).
The eigenvalues are the (complex) magnitude & phase that would be applied to a field after a roundtrip in the cavity. Thus a negative magnitude means the field decays each roundtrip (radiation loss or something), and a Zero-phase means the field is in-phase with itself (resonant) and can constructively interfere with itself after a round-trip.
The eigenvectors are the magnitudes/coefficients of each mode in the basis set (the modes of the central-section WG) to get the above eigenvalues. You would launch the central-section modes at these magnitudes/phases to produce the optical fields corresponding to the eigenvalue (to get that round-trip amplitude & phase).
For eigenvalues & eigenvectors, indexing is like so:
>>> eigenvalues[Iwavelength][Imodenum]
Where `wavelengths[Iwavelength]` tells you which wavelength you're inspecting, and `Imodenum` tells you which mode number you're inspecting.
resWLs , resEigVals, resEigVects : list of complex floats
The Resonance wavelengths and corresponding EigenValues & EigenVectors (complex numbers).
Each list index corresponds to a cavity mode with unique lateral mode-profile, and there may be multiple resonances found for each mode. If no resonances were located, `None` is entered into the list for that mode.
Indexing is similar to `eigenvalues` & `eigenvectors`
pseudo-attributes:
mode(N) : select one or more cavity modes to extract data for, or pass the string 'all' to work with all modes. This actually (invisibly to the user) returns a `CavityMode` object, which can perform other actions on the selected mode.
See `help(CavityObj.mode('all')` or`help(CavityMode)` for more info on the usage & attributes/methods available.
Examples
--------
Make the left & right hand side devices, with 20 periods of repeating waveguides. Note that the last waveguide in LHS is the same as the first waveguide in RHS. Location of the split and thickness on either side is arbitrary.
>>> LHS = <pyfimm>.Device( 20*( WG2(0.275) + WG3(0.125) ) + WG1(0.05) )
>>> RHS = <pyfimm>.Device( WG1(0.05) + 20*( WG2(0.275) + WG3(0.125) ) )
>>> Cav = <pyfimm>.Cavity( LHS, RHS ) # Define the cavity
>>> WLs = numpy.array( [1.490, 1.495, 1.500, 1.505, 1.510] )
>>> Cav.calc( WLs ) # Sweep the wavelength and calculate the eigenmodes
>>> Cav.mode(0).plot() # plot the eigenvalues for the first lateral mode
>>> Cav.mode([0,1,2]).plot() # plot the eigenvalues for the first three lateral modes
>>> Cav.mode('all').plot() # plot the eigenvalues for all modes
>>> Cav.mode(0).plot('Ex') # plot the Ex electric field vs. Z for resonance of lateral Mode #0.
>>> print Cav.get_resonance_wavelengths() # print the resonance wavelengths
'''
def __init__(self, *args, **kwargs):
'''Please see help(Cavity) for usage info.'''
#if DEBUG(): print "Cavity() connection test: " + str(fimm.Exec("app.numsubnodes()"))
if len(args) >= 2:
self.LHS_Dev = args[0]
self.RHS_Dev = args[1]
self.name = "Cavity(%s/%s)"%(self.LHS_Dev.name, self.RHS_Dev.name)
else:
raise ValueError("Invalid Number of arguments to Cavity constructor - expected exactly 2 Device objects.")
## Should check that LHS & RHS sections have same central cross-section
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Cavity(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
#end __init__
def __str__(self):
''' How to `print` this object.'''
string= 10*"-" + " Left-Hand Device " + 10*"-" + "\n"
string += str(LHS_Dev)
string= 10*"-" + " Right-Hand Device " + 10*"-" + "\n"
string += str(RHS_Dev)
return string
#end __str__
def buildNode(self, parent=None, overwrite=False, warn=True, build=True):
'''If either of the two constituent Devices passed haven't been built, they will now have their nodes built.
Parameters
----------
parent : Node object, optional
Provide the parent (Project/Device) Node object for this waveguide.
build : { True | False }, optional
If either of the constituent Devices aren't built, attempt to call their `buildNode` method.
overwrite : { True | False }, optional
Overwrite existing Device node of same name? Defaults to False, which will rename the node if it has the same name as an existing node.
warn : {True | False}, optional
Print notification if overwriting a node/building this Cavity? True by default.
'''
if warn: print "WARNING: Cavity.buildNode(): Cavity is not a FimmWave node, just a pyFimm virtual-object, so there is nothing to build in FimmWave for this Cavity. The constituent FimmWave Devices will now attempt to be built."
if parent: self.parent = parent
if not self.LHS_Dev.built: self.LHS_Dev.buildNode(name='LHS', parent=self.parent, overwrite=overwrite, warn=warn)
if not self.RHS_Dev.built: self.RHS_Dev.buildNode(name='RHS', parent=self.parent, overwrite=overwrite, warn=warn)
#end buildNode()
def calc(self, WLs, Display=False):
'''Calculate the scattering matrices and eigenvalues of the cavity.
Based on PhotonDesign's Example "Modelling a passive optical cavity (VCSEL, DFB)" & the accompanying Python script by Vincent Brulis at Photon Design, 2014.
Parameters
----------
WLs : list/array of floats
List of wavelengths at which to calculate the cavity eigenvalues. This determines the wavelength-accuracy of the resonance wavelengths found - you will have to choose the wavelengths at which to calculate the modes.
Display : { True | False }, optional
Display the calculated eigenvalues during wavelength sweep? This allows the user to copy/paste the results, rather than using the internally generated attributes, below. Defaults to False.
Returns
-------
Nothing is directly returned by this operation, but new attributes of the Cavity object will be available after calc() is called. These new attributes are:
wavelengths : 2-D list of floats
The wavelengths at which eigenvalues were calculated. This is a direct copy of the `WLs` array passed to the calc() function.
eigenvalues, eigenvectors : 2-D list of floats
The complex eigenvalues & eigenvectors at each of the calculated wavelengths. First dimension of the array is to choose lateral cavity mode (up to get_N() ). eg. [ [EigV_mode0_WL0, EigV_mode0_WL1, ... EigV_mode0_WLN], [EigV_mode1_WL0, EigV_mode1_WL1, ... EigV_mode1_WLN], ... , [EigV_modeN_WL0, EigV_modeN_WL1, ... EigV_modeN_WLN] ]
The imaginary part of the eigenvalue corresponds to the round-trip optical phase, and the real part corresponds to the cavity loss. The eigenvectors are vectors containing the amplitudes of each mode required to attain the corresponding eigenvalue, and they can be input directly into a Device via `Device.set_input( <vector> )`.
resonance_wavelengths, resonance_eigenvalues, resonance_eigenvectors : 2-D list of floats
The wavelengths & corresponding eigenvalues/vectors for cavity resonances, if any. First dimension is to choose lateral mode (up to get_N() ), identical to eigvals. `None` will be entered into the list for any modes that do not show a resonance.
Resonance is located by determining at which wavelength imag(eigenvalue) is closest to zero & real(eigenvalue) is positive. The strongest resonance will show the maximum real(eigenvalue).
The Cavity device will have the attributes `CavityObj.S_RHS_ll` & `CavityObj.S_LHS_rr` added, which are the left-to-left & right-to-right scattering matrices for the Right & Left devices, respectively (with reflections pointing at the central split).
Also the attribute `CavityObj.S_RT` will contain the round-trip scattering matrix, as viewd from the cavity split. This is simplt the dot-product of S_RHS_ll & S_LHS_rr.
Examples
-------
Calculate cavity modes in the range of wavelengths from 990nm to 1200nm:
>>> CavityObject.calc( numpy.arange( 0.990, 1.200, 0.01 ) )
or just at a few wavelengths:
>>> CavityObject.calc( [1.000, 1.050, 1.110] )
Calculated eigenvalues can be accessed in the resulting numpy.array:
>>> CavityObj.eigenvalues
This is an array with eigenvalues for each mode, with the form
[ [EigsMode0], [EigsMode1], [EigsMode2], ..... [EigsModeN] ]
so len( CavityObj.eigenvalues ) == NumModes = pyFIMM.get_N()
use pyFIMM.set_N(num_modes) to set the number of lateral waveguide modes to include in the calculation.
'''
self.wavelengths = np.array(WLs)
self.eigenvalues, self.eigenvectors = self.__CavityModeCalc( self.LHS_Dev, self.RHS_Dev, WLs , Display=Display) # The main calculation function/loop
self.resWLs, self.resEigVals, self.resEigVects, self.resLosses = self.__FindResonance( get_N() )
#return self.eigenvalues
#end calc()
def mode(self, num):
'''Select a lateral mode to work on. Defaults to 'all' modes.
Parameters
----------
num : int, list of int's, or 'all'
If an integer is passed, that lateral mode is selected. If the string "all" is passed, the functions will attempt to return data for all modes calculated, when applicable.
Technically, this method returns a CavityMode object, so to find out what methods/attributes you can perform on `CavityObj.mode(0)`, type `help(pyfimm.CavityMode)` or simply `help( CavityObj.mode(0) )`
'''
return CavityMode(self, num) # return CavityMode object
def get_length(self):
'''Get the total length of this Cavity.'''
return self.LHS_Dev.get_length() + self.RHS_Dev.get_length()
def __ploteigs(self, ):
'''DECPRECATED: Cavity.ploteigs() is replaced by `Cavity.mode('all').plot()`, so the code is now in the __CavityMode.py module
Plot the Eigenvalues for all modes at each wavelength.
Real parts plotted with '-x' & imaginary parts plotted with '-o'.
Returns
-------
handles : tuple of (fig1, ax1, ax2, l1, leg1, l2, leg2 )
Returns handles to the plot's objects, as so:
fig1 : main figure object
ax1 : primary (right) axis, for the Real part of the Eigenvalues.
ax2 : secondary (left) axis, for the Imaginary part of the Eigenvalues.
l1 : list of line objects, for the Real part of the Eigenvalues.
leg1 : legend strings for lines in l1, for the Real part of the Eigenvalues.
l2 : list of line objects, for the Imaginary part of the Eigenvalues.
leg2 : legend strings for lines in l2, for the Imaginary part of the Eigenvalues.
'''
print "WARNING: Cavity.ploteigs() is being deprecated - please use `Cavity.mode('all').plot()` instead"
import matplotlib.pyplot as plt
if len(self.eigenvalues) == 0: raise UserWarning("No Cavity modes found! Cavity modes not calculated yet? Please run Cavity.calc() to do so.")
EigsArray = self.eigenvalues
WLs = self.wavelengths
fig1, ax1 = plt.subplots(1, 1)
box = ax1.get_position()
ax1.set_position([ box.x0, box.y0, box.width * 0.8, box.height]) # reduce axis width to 80%, to make space for legend
ax2 = ax1.twinx()
# print EigenVector for each mode:
l1 = []; l2 = []; leg1 = []; leg2=[]
for i in range( EigsArray.shape[1] ):
print "%i: "%i,
l1.append( ax1.plot(WLs, EigsArray[:,i].real, '-x', label="Mode "+str(i)+": real") )
leg1txt.append("Mode "+str(i)+": real")
l2.append( ax2.plot(WLs, EigsArray[:,i].imag, '-o', label="Mode "+str(i)+": imag") )
leg2txt.append("Mode "+str(i)+": imag")
#ax1.plot(WLs, EigsArray[:,0].real, label="Mode "+str(i)+": real")
#ax2.plot(WLs, EigsArray[:,0].imag, label="Mode "+str(i)+": imag")
ax1.set_xlabel(r"Wavelength, ($\mu{}m$)")
ax1.set_ylabel("Real")
ax2.set_ylabel("Imaginary")
ax1.set_title("Cavity Eigenvalues")
#plt.legend()
leg = ax1.legend( (l1, l2), (leg1txt, leg2txt), loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
fig1.canvas.draw(); fig1.show()
return fig1, ax1, ax2, l1, l2, leg
#end ploteigs
def get_refractive_index(self, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, calc=True):
'''Calls `Dev.get_field('index')` of each sub-Device to return the refractive index of the device, and then concatenates them appropriately. The `component` & `direction` options have been removed as compared with `get_field()`.
See `help(Device.field)` for info on the other options.
'''
zptsL=int(zpoints/2.); zptsR=np.round(zpoints/2.)
Lfield = self.LHS_Dev.get_field('rix', zpoints=zptsL, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc)
Rfield = self.RHS_Dev.get_field('rix', zpoints=zptsR, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, direction='total', calc=calc)
Lfield.extend(Rfield) # concatenate the L+R fields
return Lfield
#end get_refractive_index()
def plot_refractive_index(self, zpoints=3000, zmin=0.0, zmax=None, xcut=0.0, ycut=0.0, calc=True, return_handles=False, title=None):
'''Plot the refractive index versus Z.
return_handles = { True | False }, optional
If True, will return handles to the figure, axes, legends and lines. False by default.
title = str, optional
Pre-pend some text to the plot title.
Other options are passed to `Dev.get_field()` of the two constituent Devices that make up this Cavity, so see `help(Device.field)` for info on the other options.
'''
import matplotlib.pyplot as plt # to create new figure
rix = self.get_refractive_index(zpoints=zpoints, zmin=zmin, zmax=zmax, xcut=xcut, ycut=ycut, calc=calc)
z = np.linspace( 0, self.get_length(), num=len(rix) ) # Z-coord
fig1, ax1 = plt.subplots(1, 1) # 1 axis
l1 = [ ax1.plot(z, np.array(rix).real, 'g-', label="Refractive Index" ) ] # plot
ax1.set_ylabel( "Refractive Index" )
titlestr = self.name + ": Refractive Index vs. Z"
if title: titlestr = title + ": " + titlestr
ax1.set_title( titlestr )
ax1.grid(axis='both')
#plt.legend()
ax1.set_xlabel(r"Z, ($\mu{}m$)")
#leg = plt.legend()
#leg = ax1.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
#leg2 = ax2.legend( loc='upper left', bbox_to_anchor=(1, 1) , fontsize='small' )
fig1.canvas.draw(); fig1.show()
# return some figure handles
if return_handles:
return fig1, ax1, l1
#end plot_refractive_index()
def __CavityModeCalc(self, LHS, RHS, scan_wavelengths, OverlapThreshold=0.95, Display=False):
'''Cavity Mode Calculator
Based on PhotonDesign's Example "Modelling a passive optical cavity (VCSEL, DFB)"
Python script by Vincent Brulis at Photon Design, 2014; heavily modified by Demis D. John to incorporate into pyFIMM.
Parameters
----------
LHS : Left-hand Side Device object
RHS : Right-hand Side Device object
WL_range : array-like
Wavelengths to solve for as list, array or similar (any iterable).
OverlapThreshold : float, optional
If the overlap between the eigenvector and the mode is above this threshold, we will consider this eigenvector to represent this mode number. Default= 0.95. This is important when sorting the eigenvectors, as numpy sorts the eigenproblem's solutions by the eigenvalue, while we would prefer to sort them based on which waveguide mode they represent.
Display : { True | False }, optional
Print the calculated eigenvalues during wavelength sweep? Defaults to False. Useful for copy/pasting the data into a text file.
Returns
-------
(eigenvals, eigenvects)
eigenvals : numpy array
Calculated eigenvalues at each wavelength as a numpy.array with eigenvalues for each waveguide mode, with the form
[ [EigsMode0, EigsMode1, EigsMode2, ..... EigsModeN] <-- 1st wavelength in scan_wavelengths
[EigsMode0, EigsMode1, EigsMode2, ..... EigsModeN] <-- 2nd wavelength in scan_wavelengths
...
[EigsMode0, EigsMode1, EigsMode2, ..... EigsModeN] ] <-- last wavelength in scan_wavelengths
so len( CavityObj.eigenvalues ) == NumModes = pyFIMM.get_N()
eigenvects : numpy array
The calculated eigenvectors - amplitude/phase coefficients for each calc'd mode in the central section to achieve the above eigenvalues. Similar format as eigenvects. These can be launched via `DeviceObj.set_input()`.
Adds the following attributes to the Cavity object:
S_RHS_ll, S_LHS,rr: lists
Scattering matrices as viewed from teh cavity split, for the RHS reflection (ll) and LHS reflection (rr).
S_RT : list
Scattering matrix for the round-trip, which is simply the dot-product of S_RHS_ll & S_LHS_rr.
'''
import sys # for progress bar
nWLs = len(scan_wavelengths) # Number of WLs.
#Nguided=0 #dimension of the truncated eigenmatrix, should be set to number of guided modes, please set to 0 to solve the eigenproblem for all the modes
#OverlapThreshold = 0.95 # if the overlap between the eigenvector and the mode is above this threshold, we will consider them identical
self.__FPList = []
self.__pathFPList = []
self.__projFPList = []
self.__ProjList = []
self.__pathProjList = []
self.__PDnames = []
self.__PDpath = []
self.__PDproj = []
self.__eigen_imag = []
self.__eigen_real = []
self.S_RHS_ll = []
self.S_LHS_rr = []
self.S_RT = []
fimm.Exec("Ref& parent = app")
n = len(self.__FPList)
fimm.Exec("Ref& fpLHS = " + LHS.nodestring)
fimm.Exec("Ref& fpRHS = " + RHS.nodestring)
# Retrieve the number of modes in the central section
N=fimm.Exec("fpRHS.cdev.eltlist[1].mlp.maxnmodes") # could replace with `self.RHS.element...`
while 1:
try:
N = int(N) # check if numerical value returned
break
except ValueError:
print self.name + ".calc:CavityModeCalc(): WARNING: Could not identify how many modes are calculated in the cavity, using get_N()"
N = get_N()
#if DEBUG(): print "CMC(): N={}".format(N)
# for printing our the eigenvectors/values:
Ndisplay = N # we want to display all the modes
labels = "lambda "
for i in range(0,Ndisplay,1):
labels = labels + "real_mode" + str(i+1) + " imag_mode" + str(i+1) + " "
#print labels
# mode 1: scan wavelength <-- This is the only mode this script currently runs in
# we will display all the modes, ranked by waveguide mode
EigVect = [] ## To save the EigenVectors vs. wavelength
EigVal = []
# Progress Bar setup:
ProgMax = 20 # number of dots in progress bar
if nWLs<ProgMax: ProgMax = nWLs
print "\n|" + ProgMax * "-" + "| Cavity.calc() progress"
sys.stdout.write('|'); sys.stdout.flush(); # print start of progress bar
nProg = 0 # progress bar - fraction of progress
for step,wavelength in enumerate(scan_wavelengths):
''' `step` goes from 0-->len(scan_wavelengths).
`wavelength` is the actual WL value. '''
'''scan_wavelengths is already array-like, no need to construct wavelength at each step '''
#wavelength = wavelength_min + step*(wavelength_max - wavelength_min)/wavelength_steps
fimm.Exec("fpRHS.lambda="+str(wavelength)) # set Device-specific wavelength
fimm.Exec("fpLHS.lambda="+str(wavelength))
# this reset is an attempt to prevent memory issues
fimm.Exec("fpRHS.reset1()")
fimm.Exec("fpLHS.reset1()")
fimm.Exec("fpRHS.update()") # calc the Scattering Matrix
RRHS = np.zeros( [N,N], dtype=complex )
SMAT = []
for i in range(1,N+1,1):
''' Get Left-to-Left (reflecting) scattering matrix for Right-hand-side of cavity, for each WG mode.'''
SMAT.append( fimm.Exec("fpRHS.cdev.smat.ll["+str(i)+"]") )
for i in range(1,N+1,1):
for k in range(1,N+1,1):
RRHS[i-1][k-1] = SMAT[i-1][k] # the index "k" is due to the fact that the first element of each line is "None"
#if DEBUG(): print "RRHS:" # temp
#if DEBUG(): print RRHS # temp
self.S_RHS_ll.append( RRHS ) # store the left-to-left scattering matrix
fimm.Exec("fpLHS.update()")
# update progress bar:
if ( step >= nProg*nWLs/ProgMax ):
sys.stdout.write('*'); sys.stdout.flush(); # print a small progress bar
nProg = nProg+1
if ( step >= nWLs-1 ):
sys.stdout.write('| done\n'); sys.stdout.flush(); # print end of progress bar
RLHS = np.zeros([N,N],dtype=complex)
SMAT = []
for i in range(1,N+1,1):
'''Get Right-to-Right (reflecting) scattering matrix for Left-hand-side of cavity, for each WG mode.'''
SMAT.append( fimm.Exec("fpLHS.cdev.smat.rr["+str(i)+"]") )
for i in range(1,N+1,1):
for k in range(1,N+1,1):
RLHS[i-1][k-1] = SMAT[i-1][k] # the index "k" is due to the fact that the first element of each line is "None"
self.S_LHS_rr.append( RLHS ) # store the right-to-right scattering matrix
''' Calculate the round-trip matrix R2, by multiplying reflecting Smat's of each side of cavity. '''
R2 = np.dot(RRHS,RLHS) # combined scattering matrix for cavity round-trip
self.S_RT.append( R2 ) # store round-trip scattering matrix at this wavelength
# solve eigenproblem
Eig = np.linalg.eig(R2) # returned in format: (array([e1, e2]), array([v1, v2])
# eigenvector (coefficient of each WG mode to produce scalar transformation) is in Eig[1]
# eigenvalue (amplitude & phase applied to EigVect upon roundtrip) is in Eig[0]
'''
Eig_reorg = [] # we want to achieve an easier format: ([e1,v1],[e2,v2])
for i in range(0,Nguided,1):
Eig_reorg.append([Eig[0][i],Eig[1][i]])
'''
# 'zip' the two arrays together to rearrange as [ [e1,[v1]], [e2,[v2]]...[eN,[vN]] ]
Eig_reorg = map(list, zip(Eig[0], Eig[1]) ) # also re-map the (tuples) that zip() returns to [lists], so [list].append() will work later
#if DEBUG(): print "Eig_reorg=" , Eig_reorg
# now we move on to processing and displaying the results
'''
# we will display all the modes, ranked by eigenvalue
Eig_ranked = []
# calculate magnitude of eigenvalue then rank Eigenvalues accordingly
#*** I think these loops can be replaced with more efficient Numpy functions
for i in range(0,Nguided,1):
magnitude = (Eig_reorg[i][0].real)**2+(Eig_reorg[i][0].imag)**2
if len(Eig_ranked)==0:
Eig_ranked.append(Eig_reorg[i]+[magnitude])
else:
found = 0
for j in range(0,len(Eig_ranked),1):
if magnitude > Eig_ranked[j][2]:
Eig_ranked_temp = Eig_ranked[:j]
Eig_ranked_temp.append(Eig_reorg[i]+[magnitude])
Eig_ranked = Eig_ranked_temp + Eig_ranked[j:]
found = 1
break
if found == 0:
Eig_ranked.append(Eig_reorg[i]+[magnitude])
'''
# Sorting by predominant mode number, instead of max eigenvalue.
'''
eg. sort eigenvalues according to which mode is largest in the eigenvector:
EigVect_Mode0 = [*0.9983*, 0.003, 3.543e-5]
EigVect_Mode1 = [5.05e-5, *0.9965*, 3.543e-5]
EigVect_Mode2 = [6.23e-5, 0.0041, *0.9912*]
'''
# sort the list of [EigVal, [EigVect]...] with built-in list sorting via sorted()
Eig_ranked = sorted( Eig_reorg, key= lambda x: np.argmax( np.abs( x[1] ) ) )
''' How the above ``sorted` function works:
The lambda function returns a `key` for sorting - where the key tells sorted() which position to put the element in the new list.
The argument passed to the lambda function, `x`, will be the current element in the list Eig_reorg as sorted() loops through it, which will look like x=[ EigVal, [EigVec0, EigVec1...EigVecN] ].
We then select only the EigenVector part with `x[1]`. Then the lambda function returns the index to whichever EigVect element has the maximum amplitude (`np.abs()`), generated by `numpy.argmax()` -- the index to that element will be the `key` used for sorting - ie. the vector that has the 1st element as max. ampl. will be sorted to the top of the resulting list.
'''
if DEBUG(): print "Eig_ranked=" , Eig_ranked
## To save EigenVector/EigenValue at this wavelength:
EigVect_n = []
EigVal_n = []
# display eigenvalues + save eigvals for each WG mode:
outputstr = str(wavelength) + " "
for i in range(0,Ndisplay,1):
## Save eigenvector/eigenvalue for this mode
EigVect_n.append(Eig_ranked[i][1])
EigVal_n.append(Eig_ranked[i][0])
#if DEBUG(): print "Mode %i: EigVect_n[-1]="%(i) , EigVect_n[-1]
outputstr = outputstr + str(Eig_ranked[i][0].real) + " " + str(Eig_ranked[i][0].imag) + " "
if Display: print outputstr
## Save Eigenvector/Eigenvalue at this wavelength
EigVect.append(EigVect_n)
EigVal.append(EigVal_n)
#if DEBUG(): print "EigVect_n(WL)=", EigVect_n
#end for(wavelengths)
print # new line
return np.array(EigVal), np.array(EigVect)
#...
# end CavityModeCalc()
def __FindResonance(self, nummodes):
'''Locate the wavelengths where the round-trip phase is zero (imaginary part of Eigenvalue = 0) & Eigenvalue (related to cavity loss) is positive (not lossy).
From Vincent Brulis @ PhotonDesign:
You can detect the resonances by identifying the wavelengths for which the imaginary part of the eigenvalue (round-trip phase) is zero and the real part is positive (the higher the real part, the less lossy the resonance). The round-trip loss (i.e. the threshold gain) for a given resonance can be obtained from 10*log(real^2).
Returns
-------
resWL, resEigVals, resEigVects, loss : lists
List of wavelengths, eigenvalues, eigenvectors and round-trip losses for each mode. List index corresponds to each mode, and will contain `None` if no cavity resonance was found for that mode.
'''
#modenum = self.modenum
WLs = self.wavelengths
resWL = []
resEigVals = []
resEigVects = []
losses = []
for modenum in range(nummodes):
Eigs_r = self.eigenvalues[:,modenum].real
Eigs_i = self.eigenvalues[:,modenum].imag
I0 = []
for i in xrange(len(Eigs_i)-1):
'''xrange() is identical to range() but more efficient with memory, and replaces range() in later Python versions (ver>=3 ?).'''
if (Eigs_i[i] > 0 and Eigs_i[i+1] < 0) or \
(Eigs_i[i] < 0 and Eigs_i[i+1] > 0):
'''If imaginary crosses zero.'''
if Eigs_r[i]>0 or Eigs_r[i+1]>0:
'''If real part is positive.
Choose the point with minimum imaginary part.'''
if abs( Eigs_i[i] ) < abs( Eigs_i[i+1] ):
I0.append( i )
else:
I0.append( i+1 )
#if DEBUG(): print "Mode %i: "%(modenum) + "crossing between indexes %i and %i"%(i, i+1)
if DEBUG(): print "Mode %i: "%(modenum) + "; Resonance found at Wavelength ", WLs[I0[-1]], " um: " + "Eigs_i=", Eigs_i[I0[-1]], "; Eigs_r=", Eigs_r[I0[-1]]
#end for(Eigs_i)
if len(I0) == 0:
''' if no resonance found'''
if DEBUG(): print( "_findres(): Mode=", modenum, " // No Resonance" )
resWL.append( None )
resEigVals.append( None )
resEigVects.append( None )
losses.append( None )
else:
if DEBUG(): print( "_findres(): Mode=", modenum, " // I0=", I0 )
resWL.append( WLs[I0] ) # save all resonance wavelengths for this mode
resEigVals.append( self.eigenvalues[I0,modenum] ) # save all resonance EigVals for this mode
resEigVects.append( self.eigenvectors[I0,modenum] ) # save all resonance EigVects for this mode
# normalize the eigenvalue, to the magnitude of the eigenvectors:
loss=[]
if DEBUG(): print("_findres(): len(resEigVals)=", len(resEigVals[-1]))
for ii in range( len(resEigVals[-1]) ):
'''in case multiple resonances for this mode'''
if DEBUG(): print( "_findres(): rVect[", ii, "]=", resEigVects[-1][ii])
if resEigVects[-1][ii] != None:
MagEigVect = [ np.sum( np.abs( rVect ) ) for rVect in resEigVects[-1][ii] ] # total magnitude of the eigenvector
eVal_norm = np.array(resEigVals[-1][ii]) / np.array(MagEigVect) # normalized eigenvalues
loss.append( 1.0 - np.real(eVal_norm) ) # fractional loss for input mode amplitude
else:
loss.append( None )
losses.append( np.array(loss) )
#end for(modenum)
return (resWL, resEigVals, resEigVects, losses)
#end __FindResonance
#end class Cavity
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,433
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/PhotonDesignLib/pdPythonLib.py
|
# pdPythonLib version 1.6
# Command Line Interface with Python for Photon Design products
from string import *
from socket import *
from struct import *
from math import ceil
from time import sleep
import os
import re
import __main__
import types
import time # for pausing execution
INTBUFFSIZE = 20 #tcp/ip buffer length defined in the application
portsTaken = []#list of ports that are already taken
nextPortAvailable = 5101
CONNECTIONATTEMPTS = 10
MaxBuffSize = 4096 #maximum data size that can be retrieved at once (recommended values: 4096 (more stable) or 8192 (faster))
delay = 0.01 #delay (in s) between two batches of data (recommended values: 0.01 (more stable) or 0.001 (faster))
def IsPortAvailable(portNo):
global portsTaken
a = 1
if (len(portsTaken)==1):
if (portNo == portsTaken[0]):
return 0
for i in range(0,len(portsTaken)):
if (portNo==portsTaken[i]):
a=0
return a
def getNextAvailablePort():
global nextPortAvailable
a = 0
while (1):
a = IsPortAvailable(nextPortAvailable)
if (a==1):
break
nextPortAvailable = nextPortAvailable + 1
return nextPortAvailable
def getNumOrStr(msgstr):
if (msgstr[0]=='('):
reidx = find(msgstr,',')
imidx = find(msgstr,')',0)
try:
rebit = float(msgstr[1:reidx])
except:
return msgstr
try:
imbit = float(msgstr[reidx+1:imidx])
except:
return msgstr
return rebit + imbit*1j
retval = None
nlidx = find(msgstr,'\n')
if (nlidx!=-1):
recmsg2 = msgstr[0:nlidx]
else:
recmsg2 = msgstr
try:
retval = float(recmsg2)
except:
retval = msgstr
return retval
def InterpretString1(commStr,varList):
currIdx = 0
nextIdx = 0
noExpr = 0
while (1):
currIdx = find(commStr,'{',currIdx)
nextIdx = find(commStr,'}',nextIdx)
if ((currIdx==-1) or (nextIdx==-1)):
break
expression = commStr[currIdx+1:nextIdx]
#Now find '%' and replace with object values
idxtemp = 0
while (1):
idxtemp = find(expression,'%',idxtemp)
if idxtemp==-1:
break
expression = expression[0:idxtemp] + repr(varList[noExpr]) + expression[idxtemp+1:]
noExpr = noExpr + 1
subobj = eval(expression,__main__.__dict__)
if (type(subobj)==types.StringType):
commStr = commStr[0:currIdx] + subobj + commStr[nextIdx+1:]
else:
commStr = commStr[0:currIdx] + repr(subobj) + commStr[nextIdx+1:]
return commStr
def InterpretString(commStr,varList):
commStr1 = ""
commStr2 = ""
currIdx = 0
nextIdx = 0
isStringDone = 0
while (isStringDone!=1):
nextIdx = find(commStr,'"',currIdx)
if (nextIdx==-1):
isStringDone=1
commStr1 = commStr[currIdx:len(commStr)]
commStr2 = commStr2 + InterpretString1(commStr1,varList)
else:
commStr1 = commStr[currIdx:nextIdx]
commStr2 = commStr2 + InterpretString1(commStr1,varList)
currIdx = find(commStr,'"',nextIdx+1) #Must have open quotes and end quotes!!!
if (currIdx==-1):
print "Error interpreting command\n"
return commStr
commStr2 = commStr2 + commStr[nextIdx:currIdx+1]
currIdx = currIdx + 1
return commStr2
#NB: msgstr must contain (".....RETVAL:.......") or it will fail!!!
def InterpretString3(msgstr):
retvalidx = find(msgstr,"RETVAL:")
if (retvalidx==-1):
return msgstr
msgstr = msgstr[retvalidx+7:]
currIdx = find(msgstr,'[')
if (currIdx!=-1): #might be a list, a 1d array or a 2d array
arrStr = re.split("\s*",msgstr)
del arrStr[0] #if it is a list or an array, first element is ''
arrStrlen = len(arrStr)
del arrStr[arrStrlen-1] #last element is the \000 character
arrList = []
#check format to see if it is a list, a 1d array or a 2d array
#list or 1d array format of arrStr[0] MUST BE:
#<array-identifier>[integer]
#for a 2d array it is:
#<array-identifier>[integer][integer]
currIdx = find(arrStr[0],'[')
nextIdx = find(arrStr[0],']',currIdx)
testStr = arrStr[0]
idx1Start = 0
try:
idx1Start = int(testStr[currIdx+1:nextIdx])
except:
return msgstr
#Now we know it's an array
#We can fill array up to the first index
for i in range(0,idx1Start):
arrList.append(None)
if (nextIdx==(len(testStr)-1)): # only one '[...]'
#This is either a 1D array or list
# we now need to work out whether this is an array of a list
#list format of arrStr[1] MUST BE:
#<array-identifier>[integer]
#for a 1d array it is:
#value (no '[')
try:
arrayOrList = find(arrStr[1],'[')
except IndexError: # this is a list with only one element
return msgstr
if arrayOrList==-1:
# this is a 1D array
for i in range(1,arrStrlen-1,2): # was range(1,arrStrlen-1,2); not sure why!
arrList.append(getNumOrStr(arrStr[i]))
return arrList
else:
# this is a list
for i in range(0,arrStrlen-1,1): # was range(1,arrStrlen-1,2); not sure why!
arrList.append(getNumOrStr(arrStr[i]))
return arrList
nextIdx = nextIdx +1
if (testStr[nextIdx]!='['):
return msgstr
currIdx = find(testStr[nextIdx:],']') + nextIdx
if (currIdx!=-1):
try:
idx2Start = int(testStr[nextIdx+1:currIdx])
except:
return msgstr
#Now we know it's a 2d array
idx1 = -1
for i in range(0,arrStrlen-2,2):
testStr = arrStr[i]
currIdx = find(testStr,'[')
nextIdx = find(testStr[currIdx:],']') + currIdx
x = int(testStr[currIdx+1:nextIdx])
currIdx2 = find(testStr[nextIdx:],'[') + nextIdx
nextIdx2 = find(testStr[currIdx2:],']') + currIdx2
y = int(testStr[currIdx2+1:nextIdx2])
#Assumed to ALWAYS be an int and currIdx+1!=nextIdx
if (x!=idx1): #next row of matrix
idx1 = x
arrList.append([])
for k in range(0,idx2Start):
arrList[idx1].append(None)
#fill inner list(array) up to first index
arrList[idx1].append(getNumOrStr(arrStr[i+1]))
return arrList
else:
return getNumOrStr(msgstr)
class pdApp:
def __init__(self):
self.appSock = None
self.currPort = None
self.cmdList = ''
def __del__(self):
if (self.appSock!=None):
self.appSock.close() #close() = os function?
self.CleanUpPort()
def CleanUpPort(self):
global portsTaken
global nextPortAvailable
if (len(portsTaken)==1):
portsTaken = []
for i in range(0,len(portsTaken)-1):
if (portsTaken[i]==self.currPort):
nextPortAvailable = portsTaken[i]
del portsTaken[i]
self.currPort = None
def StartApp(self,path,portNo = 5101):
retstr = ''
if (self.appSock!=None):
return "This object is already in use."
a = IsPortAvailable(portNo)
if (a==0):
retstr = retstr + "Port No: " + repr(portNo) + " is not available\n"
portNo = getNextAvailablePort()
retstr = retstr + "Using Port No: " + repr(portNo) +" instead.\n"
#here try to change dir to the exe path dir.
a = rfind(path,"\\")
if (a!=-1):
if (path[0:a]==''):
os.chdir("\\")
else:
os.chdir(path[0:a])
try:
os.spawnv(os.P_DETACH,path,[path,"-pt",repr(portNo)])
except:
retstr = retstr + "Could not start the application\n"
return retstr
retstr1 = self.ConnectToApp1('localhost',portNo,0)
retstr = retstr + retstr1
def ConnectToApp(self,hostname = 'localhost',portNo = 5101):
return self.ConnectToApp1(hostname,portNo,1)
def ConnectToApp1(self,hostname,portNo,selectPort = 1):
retstr = ''
if (self.appSock!=None):
return "This object is already in use.\n"
global portsTaken
global CONNECTIONATTEMPTS
if (selectPort==1):
a = IsPortAvailable(portNo)
if (a==0):
retstr = retstr + "Port No: " + repr(portNo) + " is not available\n"
portNo = getNextAvailablePort()
retstr = retstr + "Using Port No: " + repr(portNo) +" instead.\n"
self.appSock = socket(AF_INET,SOCK_STREAM)
a = 0
print "Attempting to connect to application on TCP/IP Port No. " + repr(portNo)
while (a<CONNECTIONATTEMPTS):
try:
self.appSock.connect((hostname,portNo))
break
except:
a = a + 1
print "Connection Attempt Number " + repr(a)
time.sleep(1)
if (a==CONNECTIONATTEMPTS):
print "WARNING: Failed to connect to the application\n"
return retstr + "Failed to connect to the application\n"
portsTaken.append(portNo)
self.currPort = portNo
return retstr
def AddCmd(self,commStr,varList = []):
commStr = InterpretString(commStr,varList)
commStr = commStr + ';' #doesn't hurt to add the extra semicolon
self.cmdList = self.cmdList + commStr
return None
def Exec(self,commStr,varList = []):
msgstr = None
global INTBUFFSIZE
global portsTaken
global nextPortAvailable
if (self.appSock==None):
return "application not initialised\n"
self.AddCmd(commStr,varList)
#commlen = len(self.cmdList) #old protocol
commlen = len(self.cmdList)+1 #new protocol
commlenstr = repr(commlen)
self.cmdList = commlenstr + (INTBUFFSIZE-len(commlenstr))*' ' + self.cmdList + '\0'
try:
self.appSock.send(self.cmdList)
except:
self.CleanUpPort()
return "Error sending message from this port"
#here we can flush cmdList
self.cmdList = ''
recmsg = self.appSock.recv(INTBUFFSIZE) #first line received is length of message
nulIdx = find(recmsg,'\x00')
recmsg = recmsg[0:nulIdx]
try:
recmsglen = int(recmsg)
except ValueError:
return None #probably a app.exit command
recmsg = ""
if (recmsglen>MaxBuffSize): # if there is more data than can be transmitted in one go
batches=int(ceil(float(recmsglen)/float(MaxBuffSize)))
for i in range(1,batches+1,1):
while True:
try:
recmsg = recmsg + self.appSock.recv(MaxBuffSize)
sleep(delay)
break
except:
pass
else:
recmsg = self.appSock.recv(recmsglen)
#now test to see what has been returned
if (len(recmsg)<recmsglen): # part of the message is missing
print "================================================================="
print "WARNING: some of the data sent by the application has not been received."
print "Please reduce 'MaxBuffSize' or increase 'delay' in pdPythonLib.py"
print "and try to run the script again."
print "If the problem remains please contact Photon Design."
print "================================================================="
raw_input("Press Enter to continue")
retvalcount = count(recmsg,"RETVAL:")
if retvalcount==0: #if no RETVAL, return what was returned (usually an error message)
return recmsg
if retvalcount==1:
msgstr = InterpretString3(recmsg)
return msgstr
else:
msgstr = []
riidxprev = find(recmsg,"RETVAL:") #the position of the first RETVAL statement
recmsg = recmsg[riidxprev:]
for a in range(0,retvalcount):
ridx = find(recmsg[1:],"RETVAL:")
if ridx==-1:
msgstr.append(InterpretString3(recmsg))
return msgstr
msg1 = recmsg[0:ridx+1]
msgstr.append(InterpretString3(msg1))
recmsg = recmsg[ridx+1:]
return msgstr
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,434
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Tapers.py
|
'''Tapered waveguide classes, part of pyFIMM.'''
from __globals import * # import global vars & FimmWave connection object
from __pyfimm import * # import the main module (should already be imported), includes many 'rect' classes/funcs
from __Mode import Mode # import Mode class
from __Waveguide import Waveguide # import Waveguide class
from __Circ import Circ # import Circ class
#from __pyfimm import DEBUG() # Value is set in __pyfimm.py
from numpy import inf # infinity, for hcurv/bend_radius
#import numpy as np # math
class Taper(Node):
"""Taper( LHS, RHS, [Length, Method] )
pyFimm Taper object, a way of forming waveguides that vary in Z.
The taper takes a "start" waveguide and "end" waveguide and varies between the two over the Z length. Internally, FimmProp slices it up and creates new waveguides at each slice with slight variation as specified.
This inherits from the pyFIMM Node object.
Parameters
----------
LHS : Waveguide or Circ object
WG node to begin the taper with.
RHS : Waveguide or Circ object
WG node to begin the taper with.
length : float, optional
length of the lens. May be omitted, and instead set when Called as part of Device construction.
method : { 'full' }, optional
Defaults to 'full'
Methods
-------
This is a partial list - see `dir(Taper)` to see all methods.
Please see help on a specific function via `help(Taper)` for detailed up-to-date info on accepted arguments etc.
"""
def __init__(self,*args):
self.autorun = True # unused?
self.name=None # unused?
self.built=False # unused?
self.length=0.0 # unused?
self.__materialdb = None # unused?
self.origin = 'pyfimm' # this one is used!
if len(args) == 2:
self.type = 'taper'
self.lhs = args[0].name
self.rhs = args[1].name
self.length = 1
self.method = 'full'
elif len(args) == 3:
self.type = 'taper'
self.lhs = args[0].name
self.rhs = args[1].name
self.length = args[2]
self.method = 'full'
elif len(args) == 4:
self.type = 'taper'
self.lhs = args[0].name
self.rhs = args[1].name
self.length = args[2]
self.method = args[3]
else:
'Invalid number of inputs to Taper()'
def __call__(self,width):
'''Replace with Section() returned?'''
self.length = width
return [self]
def __add__(self,other):
return [self,other]
def set_joint_type(self, jtype, jointoptions=None):
'''Set the joint type after this waveguide, if used in a Device.
type : { 'complete' | 'special complete' | 'normal fresnel' | 'oblique fresnel' }, case-insensitive
synonyms for 'complete' are { 0 }, and is also the default if unset.
synonyms for 'special complete' are { 3 | 'special' }
synonyms for 'normal fresnel' are { 1 | 'fresnel' }
synonyms for 'oblique fresnel' are { 2 }
jointoptions : Dictionary{} of options. Allows for the Device.buildnode() to set various joint options, such as angle etc. Please see help(Device) for what the possible options are.
'''
if isinstance(jtype, str): jtype=jtype.lower() # make lower case
if jtype == 0 or jtype == 'complete':
self.__jointtype = 0
if jtype == 1 or jtype == 'normal fresnel' or jtype == 'fresnel':
self.__jointtype = 1
if jtype == 2 or jtype == 'oblique fresnel':
self.__jointtype = 2
if jtype == 3 or jtype == 'special complete' or jtype == 'special':
self.__jointtype = 3
if isinstance(jointoptions, dict):
self.__jointoptions=jointoptions
elif jointoptions!=None:
ErrStr = "set_joint_type(): `jointoptions` should be a dictionary. See help(Device) for the available options."
raise ValueError(ErrStr)
#end set_joint_type()
def get_joint_type(self, *args):
'''get_joint_type( [asnumeric] )
Get the joint type that will be placed between this waveguide and the next, when inserted into a Device.
asnumeric : { True | False }, optional
A True value will cause the output to be numeric, rather than string. See help(set_joint_type) for the numerical/string correlations. False by default.
(FYI, `asnumeric=True` is used in Device.buildNode() )
'''
try:
self.__jointtype # see if variable exists
except AttributeError:
# if the variable doesn't exist yet:
if DEBUG(): print "unset " + self.name + ".__jointtype --> 'complete' "
self.__jointtype = 0
if len(args) == 0: asnumeric = False # output as string by default
if len(args) == 1: asnumeric = args[0]
if len(args) > 1: raise ValueError("get_joint_type(): Too many arguments provided.")
if asnumeric:
out= self.__jointtype
else:
if self.__jointtype == 0:
out= 'complete'
elif self.__jointtype == 1:
out= 'normal fresnel'
elif self.__jointtype == 2:
out= 'oblique fresnel'
elif self.__jointtype == 3:
out= 'special complete'
#if DEBUG(): print "get_joint_type(): ", out
return out
#end get_joint_type()
def buildNode(self):
'''This may not make sense - only able to be built in a FimmProp Device.'''
print "Warning: Tapers & WGLenses are only built within a FimmProp Device, not as stand-alone components. Nothing done for Taper.buildNode()."
def get_buildNode_str(self, nodestring):
'''Return the string needed to build this Taper.'''
pass
#end class Taper
class Lens(Node):
'''Waveguide Lens, an element of a FimmProp Device. See FimmProp Manual sec. 4.3.10.
>>> NewLensObj = Lens(wgbase, radius [,optional kwargs] )
>>> NewLensObj.set_diameter( 20.0 )
>>> NewLensObj.set_type( 'polish' )
>>> DeviceObj = <pyfimm>.Device( WG1(100) + WG2(50.0) + NewLensObj(5.0) )
Parameters
----------
wgbase : Waveguide or Circ object
The lens will reference this WG object/node & deform it in the manner specified.
radius : float, required
Radius of curvature of this lens.
Optional Keyworded Arguments
----------------------------
side : { 'left' | 'right' }, optional
Which side of the element should have the curvature/lens applied. Defaults to curvature on the Right side.
type : {'distortion' | 'polish convex' | 'polish concave'}, optional
Which method to create taper with. Defaults to 'distortion', which distorts the passed WG into a lens. Polish instead removes parts of the structure to create the curved surface, but all interfaces in the WG remain straight.
diameter : float, optional
Diameter to distort, if not the entire WG diameter. This is the chord length of widest part of lens. If omitted, will use d1 & d2
d1 : float, optional
distance from bottom of WG to leave undistorted, if `diameter` not specified. Defaults to 0.
d2 : float, optional
distance from top of WG to leave undistorted, if `diameter` not specified. Defaults to 0.
etchDepth : float, optional
For Rect. WG: specify an etch depth for regions outside the lens region.
fill_index : float, optional
For Rect. WG: specify refractive-index to fill etched regions with.
minStepSizeFrac : float, optional
Minimum integration step size. Defaults to 0.01.
tolerance : float, optional
Integration tolerance. Defaults to 0.01.
joint_method {'complete', 'special complete', 'normal fresnel', oblique fresnel'}, optional, case insensitive
What type of joint/overlap calculation method to use in between the discretized (chopped-up) taper sections.
integration_order : { 0 | 1 }
Zero- or first-order integration.
enableEVscan : { True | False}
Enable mode scanner. True by default.
Methods
-------
This is a partial list - see `dir(Lens)` to see all methods.
Please see help on a specific function via `help(Lens)` for detailed up-to-date info on accepted arguments etc.
'''
'''
TO DO
Make sure the 'length' attribute is passed on to the Section - for all inputs to Section.
'''
def __init__(self,wgbase, radius, **kwargs):
#if len(args) >=0:
#self.name=None # unused?
#self.length=0.0 # unused?
#self.__materialdb = None # unused?
self.bend_radius = inf # inf means straight
self.built=False
self.autorun = True
self.origin = 'pyfimm'
#if len(args) == 1:
self.type = 'wglens' # unused!
self.radius = radius # radius of curvature of the taper
self.wgbase = wgbase # waveguide object
if isinstance( self.wgbase, Circ):
if len(self.wgbase.layers) < 2:
ErrStr = "Circ objects must have 2 or more layers to be converted into lenses."
raise UserWarning(ErrStr)
#elif len(args) == 2:
# self.type = 'wglens'
# self.wgbase = wgbase
# self.length = args[1]
#else:
# raise ValueError('Invalid number of inputs to WGLens(). See `help(pyfimm.WGLens)`.')
# find keyworded args, with defaults provided:
self.lens_type = str( kwargs.pop( 'type', 'distortion') ).lower()
self.side = str( kwargs.pop( 'side', 'right' ) ).lower()
#self.R = kwargs.pop( 'radius', None )
#if self.R: self.R = float(self.R)
self.D = kwargs.pop( 'diameter', None )
if self.D != None: self.D = float(self.D)
self.d1 = kwargs.pop( 'd1', None )
if self.d1 != None: self.d1 = float(self.d1)
self.d2 = kwargs.pop( 'd2', None )
if self.d2 != None: self.d2 = float(self.d2)
self.minSSfrac = float( kwargs.pop( 'minStepSizeFrac', 0.01 ) )
self.tolerance = float( kwargs.pop( 'tolerance', 0.01 ) )
self.etchdepth = kwargs.pop( 'etchDepth', None )
if self.etchdepth != None: self.etchdepth = float(self.etchdepth)
self.fillRIX = kwargs.pop( 'fill_index', None )
if self.fillRIX != None: self.fillRIX = float(self.fillRIX)
self.joint_method = kwargs.pop( 'joint_method', None )
self.int_method = kwargs.pop( 'integration_order', None )
self.enableevscan = kwargs.pop( 'enableEVscan', True )
#self.name = str( kwargs.pop( 'name', 'WGlens' )
#overwrite = bool( kwargs.pop( 'overwrite', False )
#self._checkNodeName(
#self.parent = kwargs.pop( 'parent', None )
if kwargs:
'''If there are unused key-word arguments'''
ErrStr = "WARNING: Lens(): Unrecognized keywords provided: {"
for k in kwargs.iterkeys():
ErrStr += "'" + k + "', "
ErrStr += "}. Continuing..."
print ErrStr
#end __init__
def __call__(self,):
'''Calling a Taper object with one argument creates a Section of passed length, and returns a list containing this new Section.
Usually passed directly to Device in the list of WG's as so:
>>> Device( WG1(10.5) + Lens1() + WG3(10.5) )
or
>>> Device( WG1(50.0) + Taper1(200.0) + WG3(75.0) )
'''
# Always call Section with 1 args
out = [ Section( self, self.get_length() ) ]
return out
def __add__(self,other):
'''If addition used, return list with this dev prepended'''
return [self,other]
def get_length(self):
'''Return the length in Z of this lens'''
# TO DO: match up this result with fimmwave's length result
if isinstance( self.wgbase, Waveguide):
w = self.wgbase.get_width()
elif isinstance( self.wgbase, Circ):
w = 2 * self.wgbase.get_radius()
r = self.radius
return r - r*np.sin( np.arccos(w/2/r) )
def set_diameter(self, diam):
'''Set diameter, D'''
self.D = diam
def get_diameter(self):
'''Get diameter, D'''
return self.D
def set_type(self, type):
'''Type of Lens.
Parameters
----------
type : { 'distortion', 'polish convex', 'polish concave' }
Which method to create taper with. Defaults to 'distortion', which distorts the passed WG into a lens. Polish instead removes parts of the structure to create the curved surface, but all interfaces in the WG remain straight.
'''
self.lens_type = type
def get_type(self):
'''Return the Lens type, one of: { 'distortion', 'polish convex', 'polish concave' }'''
return self.lens_type
def set_joint_type(self, jtype, jointoptions=None):
'''Set the joint type after this waveguide, if used in a Device.
type : { 'complete' | 'special complete' | 'normal fresnel' | 'oblique fresnel' }, case-insensitive
synonyms for 'complete' are { 0 }, and is also the default if unset.
synonyms for 'special complete' are { 3 | 'special' }
synonyms for 'normal fresnel' are { 1 | 'fresnel' }
synonyms for 'oblique fresnel' are { 2 }
jointoptions : Dictionary{} of options. Allows for the Device.buildnode() to set various joint options, such as angle etc. Please see help(Device) for what the possible options are.
'''
if isinstance(jtype, str): jtype=jtype.lower() # make lower case
if jtype == 0 or jtype == 'complete':
self.__jointtype = 0
if jtype == 1 or jtype == 'normal fresnel' or jtype == 'fresnel':
self.__jointtype = 1
if jtype == 2 or jtype == 'oblique fresnel':
self.__jointtype = 2
if jtype == 3 or jtype == 'special complete' or jtype == 'special':
self.__jointtype = 3
if isinstance(jointoptions, dict):
self.__jointoptions=jointoptions
elif jointoptions!=None:
ErrStr = "set_joint_type(): `jointoptions` should be a dictionary. See help(Device) for the available options."
raise ValueError(ErrStr)
#end set_joint_type()
def get_joint_type(self, *args):
'''get_joint_type( [asnumeric] )
Get the joint type that will be placed between this waveguide and the next, when inserted into a Device.
asnumeric : { True | False }, optional
A True value will cause the output to be numeric, rather than string. See help(set_joint_type) for the numerical/string correlations. False by default.
(FYI, `asnumeric=True` is used in Device.buildNode() )
Examples
--------
>>> Waveguide1.get_joint_type()
> 'complete'
>>> Waveguide1.get_joint_type( True )
> 0
'''
try:
self.__jointtype # see if variable exists
except AttributeError:
# if the variable doesn't exist yet:
if DEBUG(): print "unset " + self.name + ".__jointtype --> 'complete' "
self.__jointtype = 0
if len(args) == 0: asnumeric = False # output as string by default
if len(args) == 1: asnumeric = args[0]
if len(args) > 1: raise ValueError("get_joint_type(): Too many arguments provided.")
if asnumeric:
out= self.__jointtype
else:
if self.__jointtype == 0:
out= 'complete'
elif self.__jointtype == 1:
out= 'normal fresnel'
elif self.__jointtype == 2:
out= 'oblique fresnel'
elif self.__jointtype == 3:
out= 'special complete'
#if DEBUG(): print "get_joint_type(): ", out
return out
#end get_joint_type()
'''
*********************
**** TO DO *****
*********************
Still need to implement get/set:
set_side, set_... d1, d2, etchDepth, fill_index, joint_method etc.'''
#############################
#### Node Builders ####
#############################
def buildNode(self):
'''This does not make sense - only able to be built/inserted in a FimmProp Device.'''
print "Warning: Tapers & WGLenses are only built as part of a FimmProp Device, not as stand-alone components. Nothing done for WGLens.buildNode()."
def get_buildNode_str(self, nodestring):
'''Return the string needed to build this node.
`nodestring` should be the full FimmProp nodestring to reference the element in the Device, eg.
"app.subnodes[1].subnodes[3].cdev.eltlist[5]"
'''
if isinstance( self.wgbase, Waveguide ):
type='rect' # these 'types' are currently unused
elif isinstance( self.wgbase, Circ ):
type='cyl'
else:
ErrStr = "Unsupported object passed for basis waveguide of Lens, with type `%s`. "%(type(self.wgbase) + "Please pass a Waveguide or Circ object.")
raise ValueError(ErrStr)
fpstring = ""
fpstring += nodestring + ".svp.lambda=" + str( get_wavelength() ) + " \n"
if self.bend_radius == 0:
self.bend_radius = inf
print "Warning: bend_radius changed from 0.0 --> inf (straight waveguide)"
hcurv = 0
elif self.bend_radius == inf:
hcurv = 0
else:
hcurv = 1.0/self.bend_radius
#hcurv = 1/self.bend_radius
fpstring += nodestring + ".svp.hcurv=" + str(hcurv) + " \n"
fpstring += self.wgbase.get_solver_str(nodestring, target='wglens')
# which side of element should be lensed:
if self.side == 'left':
i = 0
elif self.side == 'right':
i = 1
else:
ErrStr = 'Invalid side for lens; please use "left" or "right" (default).'
raise ValueError(ErrStr)
fpstring += nodestring + ".which_end = " +str(i) + " \n"
# which type of lens
if self.lens_type.lower() == 'distortion':
i = 0
elif self.lens_type.lower() == 'polish convex':
i = 1
elif self.lens_type.lower() == 'polish concave':
i = 2
else:
ErrStr = 'Invalid option for lens type; please use "distortion" (default) or "polish convex" or "polish concave".'
raise ValueError(ErrStr)
fpstring += nodestring + ".lens_type = " +str(i) + " \n"
if self.D:
fpstring += nodestring + ".D = " +str(self.D) + " \n"
if self.d1:
fpstring += nodestring + ".d1 = " +str(self.d1) + " \n"
if self.d2:
fpstring += nodestring + ".d2 = " +str(self.d2) + " \n"
if self.etchdepth:
fpstring += nodestring + ".etchdepth = " +str(self.etchdepth) + " \n"
if self.fillRIX:
fpstring += nodestring + ".fillrix = " +str(self.fillRIX) + " \n"
# discretization options:
fpstring += nodestring + ".minSTPfrac = " +str(self.minSSfrac) + " \n"
fpstring += nodestring + ".tolerance = " +str(self.tolerance) + " \n"
if self.joint_method:
if self.joint_method.lower() == 'complete':
fpstring += nodestring + ".joint_method = " +str(0) + " \n"
elif self.joint_method.lower() == 'special complete':
fpstring += nodestring + ".joint_method = " +str(3) + " \n"
elif self.joint_method.lower() == 'normal fresnel':
fpstring += nodestring + ".joint_method = " +str(1) + " \n"
elif self.joint_method.lower() == 'oblique fresnel':
fpstring += nodestring + ".joint_method = " +str(2) + " \n"
else:
ErrStr = "Invalid option for Taper Joint Method `%s`" %self.joint_method
raise ValueError(ErrStr)
if self.int_method:
fpstring += nodestring + ".int_method = " +str(self.int_method) + " \n"
if self.enableevscan == False:
i=0
else:
i=1
fpstring += nodestring + ".enableevscan = " +str(i) + " \n"
fpstring += nodestring + ".R = " +str(self.radius) + " \n"
return fpstring
#end class WGLens
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,435
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__globals.py
|
'''pyFIMM's Global Variables
Contains/defines global variables - most importantly the fimmwave connection object `fimm`.
This separate file is required to prevent circular module imports, and enable nested-modules (eg. in /proprietary/) to use the FimmWave connection.
'''
import numpy as np
import matplotlib.pyplot as plt
'''
## The following were various tests for resolving cyclic imports - can probably be deleted
# import some pyFIMM objects/functions for global access from within the module:
## for Mode.py:
import pylab as pl
#import matplotlib.pyplot as plt
from pylab import cm # color maps
#import numpy as np
import math
import os # for filepath manipulations (os.path.join/os.mkdir/os.path.isdir)
from __pyfimm import get_N, get_wavelength
## For Device.py
from __pyfimm import Node, Project, Material, Layer, Slice
from __Waveguide import Waveguide # rectangular waveguide class
from __Circ import Circ # cylindrical (fiber) waveguide class
from __Tapers import Taper, Lens # import Taper/WGLens classes
from __Mode import Mode # import Mode class
## for pyfimm.py:
from __Device import Device # Device class
#import numpy as np
import datetime as dt # for date/time strings
import os.path # for path manipulation
import random # random number generators
## for Waveguide.py & Circ.py:
from numpy import inf # infinity, for hcurv/bend_radius
#from __pyfimm import * # all global modesolver params.
## for Tapers.py:
#from __pyfimm import * # import the main module (should already be imported), includes many 'rect' classes/funcs
#from __Mode import * # import Mode class
#from __Waveguide import * # import Waveguide class
#from __Circ import * # import Circ class
#from __pyfimm import DEBUG() # Value is set in __pyfimm.py
#from numpy import inf # infinity, for hcurv/bend_radius
#import numpy as np # math
'''
#print "**** __globals.py: Finished importing pyFIMM modules"
global pf_DEBUG
pf_DEBUG = False # set to true for verbose outputs onto Python console - applies to all submodules/files
# can be changed at run-time via `set/unset_DEBUG()`
global pf_WARN
pf_WARN = True # globally set warning mode
# custom colormaps:
from colormap_HotCold import cm_hotcold
# Create FimmWave connection object.
import PhotonDesignLib.pdPythonLib as pd
global fimm
fimm = pd.pdApp() # used in all scripts to send commands, via `fimm.Exec('CommandsToSend')`
pdApp = fimm # alias to the above.
# These override the value set above in `pf_DEBUG`
def set_DEBUG():
'''Enable verbose output for debugging.'''
global pf_DEBUG
pf_DEBUG = True
def unset_DEBUG():
'''Disable verbose debugging output.'''
global pf_DEBUG
pf_DEBUG = False
def DEBUG():
'''Returns whether DEBUG is true or false'''
return pf_DEBUG
# the global WARN is not currently implemented in the main functions yet.
def set_WARN():
'''Enable verbose output for debugging.'''
global pf_WARN
pf_WARN = True
def unset_WARN():
'''Disable verbose debugging output.'''
global pf_WARN
pf_WARN = False
def WARN():
'''Returns whether WARN is true or false'''
return pf_WARN
def AMF_FolderStr():
'''Folder name to store temporary files in.'''
return 'pyFIMM_temp'
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,436
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Waveguide.py
|
'''Waveguide class, part of pyFIMM.'''
#from pylab import * # must kill these global namespace imports!
#from numpy import *
from __globals import * # import global vars & FimmWave connection object
from __pyfimm import * # import the main module (should already be imported), includes many 'rect' classes/funcs
from __Mode import Mode # import Mode class
from numpy import inf # infinity, for hcurv/bend_radius
class Waveguide(Node):
"""pyFimm Waveguide object, a collection of concatenated Slices.
Waveguide is an 'RWG' cartesian-coordinate waveguide (eg. rectangular channel, ridge etc.).
Waveguide is a 2-D index profile if called with just one argument (a summation of Slices).
When a Length is supplied, this becomes a 3D structure.
This inherits from the pyFIMM Node object.
Parameters
----------
layers : list
List containing the Slice objects used to generate this Waveguide.
thickness : float, optional
Apply a 3D length to this waveguide, in the direction of propagation.
name : string, optional
If building the node at creation, supply a name for this node.
parentNode : string, optional
If building the node at creation, provide the parent (Project/Device) Node object for this waveguide.
Attributes
----------
type : {'rect_waveguide'}
Currently = 'rect_waveguide'. May be deprecate as it is unused.
length : float
Apply a 3D length to this waveguide, in the direction of propagation.
slices : list
List containing all the Slices the Waveguide is constructed with.
etched_slices : list
Contains Slices with any specified etch depths applied.
etched_slices[i] = Slice(slc.layers,slc.width,slc.etch)
bend_radius : float
Bend Radius of the waveguide. The default value of `inf` indicates a straight waveguide.
Defined from the center of the waveguide cross-section to the axis of the bend.
Positive value means WG bends to the LEFT (so Right-Hand boundaries will see the radiatiing bend modes, if any). Negative value bends the opposite way.
built : { True | False }
Has this node been built in FimmWave yet?
nodestring : string
The fimmwave string pointing to this waveguide's node. eg. "app.subnodes[1].subnodes[3]"
Omits the trailing period.
Methods
-------
This is a partial list - see `dir(pf.Waveguide)` to see all methods.
Please see help on a specific function via `help(pf.Waveguide)` for detailed up-to-date info on accepted arguments etc.
mode(modenum)
modenum: int
Returns the specified Mode object. Mode(0) is usually the fundamental mode, depending on the solver options.
Subsequent Mode functions can be called, such as
>>> ThisWaveguide.mode(0).plot('Ez')
get_width()
Return total width of this Waveguide, by adding up width of each contained Slice.
get_slice_widths()
Return widths of each Slice in this Waveguide, as list.
buildNode( [name=, parentNode=] )
Build the node of this Ridge/Rectangular (RWG) waveguide in FimmWave. Sends all the FimmWave commands for this waveguide node, including modesolver parameters.
get_buildNode_str(nodestr [, obj=None, target=None])
Return the fimmwave commands needed to build this waveguide node. This command does not create the new waveguide node first (ie. it does not run `app.subnodes[1].addsubnode(rwguideNode, WGname)` )
So you must create the appropriate type of waveguide node first, and then issue the commands returned by this func.
The massive multi-line string includes all the modesolver settings needed to calculate the waveguide afterwards.
Parameters
----------
nodestr : string
Supply the string pointing to the new WG node to build under, for example `app.subnodes[1].subnodes[1]`
After a WG has been built, this parameter is available via the variable `WG_Object.nodestring`
Returns
-------
wgString : fimmwave command string
set_autorun()
Set the fimmwave "autorun" flagm which allows FimmProp to calc the modes when needed.
unset_autorun():
Unset the fimmwave "autorun" flag.
set_material_database( PathString )
Not recommended - it is safer to use a global material file, and have that file `include` other material files. FimmProp Devices only support a single global materials file.
PathString : string
Path to a FimmWave material database (*.mat) for this waveguide node, if different from the globally set one (see `set_material_database()` )
get_material_database()
Returns path to FimmWave material database (*.mat) for this waveguide node, if set.
unset_material_database()
Unsets a custom material database for this waveguide node, such that the globally set one (see `set_material_database()` ) will be used instead.
set_joint_type(type)
Set the type of FimmProp joint to use after this waveguide has been inserted into a Device.
get_joint_type(type)
Get the type of FimmProp joint to use after this waveguide has been inserted into a Device.
set_wavelength( wl )
Set the wavelength of this guide.
get_wavelength()
Return the wavelength of this guide.
Examples
--------
Create the Waveguide like so:
>>> wg = Waveguide( slice1(1.5) + slice2(0.50) + slice3(1.5) )
or
MySlices = slice1(1.5) + slice2(0.50) + slice3(1.5)
>>> wg = Waveguide( MySlices, WG_Length ) # WG_Length in microns
or, *after* creating the Waveguide, apply a length by calling with one arg:
>>> wg( WG_Length )
Then build the FimmWave node & calculate the modes:
>>> wg.set_parent('wg_prj')
>>> wg.name = 'Fimmwave RWG'
>>> wg.buildNode() # Sends all the FimmWave commands to generate the waveguide.
>>> wg.calc()
or build the node in one line:
>>> wg.buildNode( name='Fmmwave RWG', parentNode=wg_prj )
>>> wg.calc() # calculate modes
"""
def __init__(self,*args):
if len(args) >= 1:
self.type = 'rect_waveguide' # not currently used
self.autorun = True
self.name = None
self.built=False
self.length = 0.0
self.__wavelength = get_wavelength() # get global wavelength
self.modes = []
self.slices = args[0]
self.etched_slices = []
self.bend_radius = inf # Default to inf -straight. Defined from center of WG slice.
self.__materialdb = None
# apply Etch Depths for each Slice
for slc in args[0]:
etchDepth = slc.etch
if etchDepth > slc.thickness():
etchDepth = slc.thickness()
elif etchDepth < 0:
etchDepth = 0
if etchDepth != 0:
etched_layer_array = []
for nn in range(0,len(slc)):
if slc.thickness() - sum(slc.layer_thicknesses()[0:nn+1]) > etchDepth:
etched_layer_array += [slc.layers[nn]]
else:
top_layer = Layer(slc.layers[len(slc)-1].material,etchDepth,False)
etched_layer = Layer(slc.layers[nn].material,sum(slc.layer_thicknesses()[nn:len(slc)])-etchDepth,slc.layers[nn].cfseg)
etched_layer_array += [etched_layer]
etched_layer_array += [top_layer]
self.etched_slices.append(Slice(etched_layer_array,slc.width,slc.etch))
break
elif etchDepth == 0:
if slc.layer_thicknesses()[len(slc)-1] == 0.0:
del slc.layers[len(slc)-1]
self.etched_slices.append(Slice(slc.layers,slc.width,slc.etch))
if len(args) ==2:
self.length = args[1] # apply passed length
#end init()
def __str__(self):
'''What to display if the Waveguide is `print`ed.'''
string = ""
if self.name: string += "Name: '"+self.name+"'\n"
for n,slc in enumerate(self.etched_slices):
if n ==0:
string += 5*'-' + ' Leftmost Slice: ' + 5*'-' + '\nwidth = %7.4f \n' % slc.width
elif n == (len(self.etched_slices)-1):
string += 5*'-' + ' Rightmost Slice: ' + 5*'-' + '\nwidth = %7.4f \n' % slc.width
else:
string += 5*'-' + ' Middle Slice %i: ' % n + 5*'-' + '\nwidth = %7.4f \n' % slc.width
for i,lyr in enumerate(slc.layers):
if i == 0:
string += 3*'*' + ' Bottom Layer: ' + 3*'*' + '\n%s' % lyr + '\n'
elif i == (len(slc.layers)-1):
string += 3*'*' + ' Top Layer: ' + 3*'*' + '\n%s' % lyr + '\n'
else:
string += 3*'*' + ' Middle Layer %i: ' % i + 3*'*' + '\n%s' % lyr + '\n'
return string
def __len__(self):
return len(self.slices)
def __call__(self,length):
'''Calling a WG object with one argument creates a Section of passed length, and returns a list containing this new Section.
Usually passed directly to Device as so:
>>> NewDevice = pyfimm.Device( WG1(10.5) + WG2(1.25) + WG3(10.5) )
Parameters
----------
length : float
Pass a length (microns). This will be applied to the returned Section Object, which will also contain a reference to this waveguide object.
'''
# Instantiate a Section obj with 2 args
out = [ Section( self, length ) ]
return out
def __add__(self,other):
'''Additions will tack on each waveguide, presumably in the propagation direction, for contatenating multiple waveguides. Returns a list of all Waveguide objects.'''
'''CHECK THIS "presumably" statement!!!'''
return [self,other]
def get_width(self):
'''Return total width of this Waveguide, by adding up width of each contained Slice.'''
wdth = 0.0
for slc in self.slices:
wdth += slc.width
return wdth
def width(self):
'''Backwards compatibility only. Should Instead get_width().'''
print "Deprecation Warning: width(): Use get_width() instead."
return get_width()
def get_slice_widths(self):
'''Return widths of each Slice in this Waveguide.'''
slc_wdths = []
for slc in self.slices:
slc_wdths.append(slc.width)
return slc_wdths
def slice_widths(self):
'''Backwards compatibility only. Should Instead get_slice_widths().'''
print "Deprecation Warning: slice_widths(): Use get_slice_widths() instead."
return get_slice_widths(self)
def mode(self,modeN):
'''Waveguide.mode(int): Return the specified pyFimm Mode object for this waveguide.'''
return Mode(self, modeN,"app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.")
def calc(self,polish=False):
'''Calculate/Solve for the modes of this Waveguide. Build the node if needed.
polish : polish modes if True, calculate modes as normal if False, optional
'''
if not self.built: self.buildNode()
if polish:
fimm.Exec("app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.polishevs")
else:
fimm.Exec("app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.update()")
def set_autorun(self):
'''FimmProp Device will automatically calculate modes as needed.'''
self.autorun = True
def unset_autorun(self):
'''FimmProp Device will Not automatically calculate modes as needed.'''
self.autorun = False
def set_material_database(self, path):
'''Set a material database for this waveguide node (overrides the global setting of `pyfimm.set_material_database(path)` ).'''
self.__materialdb = str(path)
def get_material_database(self):
'''Returns a custom material database for this waveguide node.'''
return self.__materialdb
def unset_material_database(self):
'''Clears the custom material database for this waveguide node. The global setting `pyfimm.set_material_database(path)` will be used instead.'''
self.__materialdb = None
def set_joint_type(self, jtype, jointoptions=None):
'''Set the joint type after (on right side of) this waveguide, if used in a Device.
type : { 'complete' | 'special complete' | 'normal fresnel' | 'oblique fresnel' }, case-insensitive
synonyms for 'complete' are { 0 }, and is also the default if unset.
synonyms for 'special complete' are { 3 | 'special' }
synonyms for 'normal fresnel' are { 1 | 'fresnel' }
synonyms for 'oblique fresnel' are { 2 }
jointoptions : Dictionary{} of options. Allows for the Device.buildnode() to set various joint options, such as angle etc. Please see help(Device) for what the possible options are.
'''
if isinstance(jtype, str): jtype=jtype.lower() # make lower case
if jtype == 0 or jtype == 'complete':
self.__jointtype = 0
if jtype == 1 or jtype == 'normal fresnel' or jtype == 'fresnel':
self.__jointtype = 1
if jtype == 2 or jtype == 'oblique fresnel':
self.__jointtype = 2
if jtype == 3 or jtype == 'special complete' or jtype == 'special':
self.__jointtype = 3
if isinstance(jointoptions, dict):
self.__jointoptions=jointoptions
elif jointoptions!=None:
ErrStr = "set_joint_type(): `jointoptions` should be a dictionary. See help(Device) for the available options."
raise ValueError(ErrStr)
#end set_joint_type()
def get_joint_type(self, *args):
'''get_joint_type( [asnumeric] )
Get the joint type that will be placed between this waveguide and the next, when inserted into a Device.
asnumeric : { True | False }, optional
A True value will cause the output to be numeric, rather than string. See help(set_joint_type) for the numerical/string correlations. False by default.
(FYI, `asnumeric=True` is used in Device.buildNode() )
'''
try:
self.__jointtype # see if variable exists
except AttributeError:
# if the variable doesn't exist yet:
if DEBUG(): print "unset " + self.name + ".__jointtype --> 'complete' "
self.__jointtype = 0
if len(args) == 0: asnumeric = False # output as string by default
if len(args) == 1: asnumeric = args[0]
if len(args) > 1: raise ValueError("get_joint_type(): Too many arguments provided.")
if asnumeric:
out= self.__jointtype
else:
if self.__jointtype == 0:
out= 'complete'
elif self.__jointtype == 1:
out= 'normal fresnel'
elif self.__jointtype == 2:
out= 'oblique fresnel'
elif self.__jointtype == 3:
out= 'special complete'
#if DEBUG(): print "get_joint_type(): ", out
return out
#end get_joint_type()
def set_wavelength(self, wl):
'''Set the wavelength for the waveguide. The object use this wavelength in their MOLAB options.
Note that, after building, the object's wavelength (`WGobj.get_wavelength()` ) can be different from the global pyFIMM wavelength (`pyFIMM.get_wavelength`).
The global setting (`pyFIMM.set_wavelength()`) is acquired when the object is first created.
Parameters
----------
wl : float
The wavelength in micrometers.
'''
if self.built:
self.__wavelength = float(wl)
fimm.Exec( self.nodestring + ".evlist.svp.lambda = " + str(self.__wavelength) + " \n" )
else:
self.__wavelength = float(wl)
def get_wavelength(self):
'''Return the wavelength (float) for this specific Device (may be different from the global pyFIMM wavelength in `pyFIMM.get_wavelength()` after the guide is built).'''
return self.__wavelength
####################################################
#### Rectangular Waveguide Node Construction ####
####################################################
def buildNode(self, name=None, parent=None, overwrite=False, warn=True, update_node=False):
'''Build the Fimmwave node of this Ridge/Rectangular (RWG) waveguide.
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
parent : Node object, optional
provide the parent (Project/Device) Node object for this waveguide.
overwrite : { True | False }, optional
Overwrite existing node of same name? Defaults to False, which will rename the node if it has the same name as an existing node.
warn : {True | False}, optional
Print notification if overwriting a node? True by default.
update_node : {True | False}, optional
False will create a new node and True re-builds the same node
'''
if name: self.name = name
if parent: self.parent = parent
if DEBUG(): print "Waveguide.buildNode(): self.parent.num=", self.parent.num
nodestring="app.subnodes["+str(self.parent.num)+"]"
if update_node:
node_num = self.num
else:
self._checkNodeName(nodestring, overwrite=overwrite, warn=warn) # will alter the node name if needed
N_nodes = fimm.Exec(nodestring + ".numsubnodes()")
node_num = int(N_nodes+1)
wgString = self.parent.nodestring + ".addsubnode(rwguideNode,"+str(self.name)+")"+"\n" # make RWG node
self.num = node_num
self.nodestring = self.parent.nodestring + ".subnodes["+str(self.num)+"]"
if update_node:
fimm.Exec( self.get_buildNode_str(self.nodestring, warn=warn, update_node=update_node) )
else:
fimm.Exec( wgString + self.get_buildNode_str(self.nodestring, warn=warn, update_node=update_node) )
self.built=True
#end buildNode()
def get_buildNode_str(self, nodestr, obj=None, target=None, warn=True, update_node=False):
'''Return the node construction string for either a standalone waveguide or device.
This is for a Rectangular/Planar (RWG) waveguide.
The new Waveguide subnode should be created BEFORE calling this function, so that you can pass the correct node string.
Parameters
----------
nodestr : str
The entire base-string to address the necessary node. For example:
>>> nodestr = "app.subnodes[1].subnodes[2]"
the subnode referenced should be the NEW subnode to be created (ie. one higher than previously in existence). In normal operation, the new subnode has already been created by WG.buildnode().
warn : { True | False }, optional
Print warnings about default values etc.?
obj : Circ object, optional
Defaults to `self`. Can pass another object instead, to get the buildNode string for that
target : { 'wglens' | 'taper' }, optional
Omits certain parameters from being set depending on target. Used for building tapers.
update_node : {True | False}, optional
False will create a new node and True re-builds the same node
'''
if not obj: obj=self
# build RWG Node
if DEBUG(): print "Waveguide: "+self.name+".__get_buildNode_str(): "
# check for custom material DB in this WG node.
if not self.__materialdb:
'''Use global material DB if this WG doesn't have a custom one set.'''
matDB = get_material_database()
else:
matDB = self.__materialdb
#if DEBUG(): print "Using custom matDB: `%s`"%matDB
wgString="" # the string to return
if matDB:
#if DEBUG(): print "setting MaterBase file to: '%s'"%matDB
wgString += nodestr + ".setmaterbase(" + matDB + ") \n"
sliceN = 1
for slc in obj.slices:
if update_node:
wgString = nodestr + ".slices[{"+str(sliceN)+"}].width = "+str(slc.width)+"\n"
wgString += nodestr + ".slices[{"+str(sliceN)+"}].etch = "+str(slc.etch)+"\n"
else:
wgString += nodestr + ".insertslice({"+str(sliceN)+"})"+"\n"
wgString += nodestr + ".slices[{"+str(sliceN)+"}].width = "+str(slc.width)+"\n"
wgString += nodestr + ".slices[{"+str(sliceN)+"}].etch = "+str(slc.etch)+"\n"
wgString += (len(slc.layers)-1)*(nodestr + ".slices[{"+str(sliceN)+"}].insertlayer(1)"+"\n")
layerN = 1
for lyr in slc.layers:
wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].size = "+str(lyr.thickness)+"\n"
if lyr.material.type == 'rix':
wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr11 = "+str(lyr.n())+"\n"+ \
nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr22 = "+str(lyr.n())+"\n"+ \
nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr33 = "+str(lyr.n())+"\n"
elif lyr.material.type == 'mat':
if DEBUG(): print "Layer %i: mx="%(layerN), lyr.material.mx, " // my=", lyr.material.my
wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].setMAT(" + str(lyr.material.mat) + ") \n"
if lyr.material.mx: wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].mx = "+str(lyr.material.mx)+"\n"
if lyr.material.my: wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].my = "+str(lyr.material.my)+"\n"
if lyr.cfseg:
wgString += nodestr + ".slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].cfseg = "+str(1)+"\n"
layerN += 1
sliceN += 1
#end for(slices)
# build boundary conditions - metal by default
if get_left_boundary() is None:
'''Default to Electric Wall/metal'''
if warn: print self.name + ".buildNode(): Left_Boundary: Using electric wall boundary."
wgString += nodestr + ".lhsbc.type = 1"+"\n"
else:
if get_left_boundary().lower() == 'metal' or get_left_boundary().lower() == 'electric wall':
wgString += nodestr + ".lhsbc.type = 1"+"\n"
elif get_left_boundary().lower() == 'magnetic wall':
wgString += nodestr + ".lhsbc.type = 2"+"\n"
elif get_left_boundary().lower() == 'periodic':
wgString += nodestr + ".lhsbc.type = 3"+"\n"
elif get_left_boundary().lower() == 'transparent':
wgString += nodestr + ".lhsbc.type = 4"+"\n"
elif get_left_boundary().lower() == 'impedance':
wgString += nodestr + ".lhsbc.type = 5"+"\n"
else:
print self.name + ".buildNode(): Invalid input to set_left_boundary()"
if get_right_boundary() is None:
'''Default to Electric Wall/metal'''
if warn: print self.name + ".buildNode(): Right_Boundary: Using electric wall boundary."
wgString += nodestr + ".rhsbc.type = 1"+"\n"
else:
if get_right_boundary().lower() == 'metal' or get_right_boundary().lower() == 'electric wall':
wgString += nodestr + ".rhsbc.type = 1"+"\n"
elif get_right_boundary().lower() == 'magnetic wall':
wgString += nodestr + ".rhsbc.type = 2"+"\n"
elif get_right_boundary().lower() == 'periodic':
wgString += nodestr + ".rhsbc.type = 3"+"\n"
elif get_right_boundary().lower() == 'transparent':
wgString += nodestr + ".rhsbc.type = 4"+"\n"
elif get_right_boundary().lower() == 'impedance':
wgString += nodestr + ".rhsbc.type = 5"+"\n"
else:
print self.name + ".buildNode(): Invalid input to set_right_boundary()"
if get_bottom_boundary() is None:
'''Default to Electric Wall/metal'''
if warn: print self.name + ".buildNode(): Bottom_Boundary: Using electric wall boundary."
wgString += nodestr + ".botbc.type = 1"+"\n"
else:
if get_bottom_boundary().lower() == 'metal' or get_bottom_boundary().lower() == 'electric wall':
wgString += nodestr + ".botbc.type = 1"+"\n"
elif get_bottom_boundary().lower() == 'magnetic wall':
wgString += nodestr + ".botbc.type = 2"+"\n"
elif get_bottom_boundary().lower() == 'periodic':
wgString += nodestr + ".botbc.type = 3"+"\n"
elif get_bottom_boundary().lower() == 'transparent':
wgString += nodestr + ".botbc.type = 4"+"\n"
elif get_bottom_boundary().lower() == 'impedance':
wgString += nodestr + ".botbc.type = 5"+"\n"
else:
print self.name + ".buildNode(): Invalid input to set_bottom_boundary()"
if get_top_boundary() is None:
'''Default to Electric Wall/metal'''
if warn: print self.name + ".buildNode(): Top_Boundary: Using electric wall boundary."
wgString += nodestr + ".topbc.type = 1"+"\n"
else:
if get_top_boundary().lower() == 'metal' or get_top_boundary().lower() == 'electric wall':
wgString += nodestr + ".topbc.type = 1"+"\n"
elif get_top_boundary().lower() == 'magnetic wall':
wgString += nodestr + ".topbc.type = 2"+"\n"
elif get_top_boundary().lower() == 'periodic':
wgString += nodestr + ".topbc.type = 3"+"\n"
elif get_top_boundary().lower() == 'transparent':
wgString += nodestr + ".topbc.type = 4"+"\n"
elif get_top_boundary().lower() == 'impedance':
wgString += nodestr + ".topbc.type = 5"+"\n"
else:
print self.name + ".buildNode(): Invalid input to set_top_boundary()"
if get_x_pml() is None:
'''Default to 0.0'''
wgString += nodestr + ".lhsbc.pmlpar = {0.0}"+"\n"+ \
nodestr + ".rhsbc.pmlpar = {0.0}"+"\n"
else:
wgString += nodestr + ".lhsbc.pmlpar = {"+str(get_x_pml())+"}"+"\n"+ \
nodestr + ".rhsbc.pmlpar = {"+str(get_x_pml())+"}"+"\n"
if get_y_pml() is None:
'''Default to 0.0'''
wgString += nodestr + ".topbc.pmlpar = {0.0}"+"\n"+ \
nodestr + ".botbc.pmlpar = {0.0}"+"\n"
else:
wgString += nodestr + ".topbc.pmlpar = {"+str(get_y_pml())+"}"+"\n"+ \
nodestr + ".botbc.pmlpar = {"+str(get_y_pml())+"}"+"\n"
wgString += self.get_solver_str(nodestr, obj=obj, target=target)
#fimm.Exec(wgString)
return wgString
#end get_buildNodeStr()
def get_solver_str(self, nodestr, obj=None, target=None):
''' Return only the Solver ('svp') and mode solver (MOLAB, 'mpl') params for creating this node.
Used for building Tapers, when the WG is already built otherwise.'''
if not obj: obj=self
#if DEBUG(): print "Waveguide.get_solver_str()... "
wgString = ""
# set solver parameters
if target == 'wglens' or target == 'taper':
'''hcurv/bend_radius is set separately for Taper or WGLens, since they could have a different curvature from their base WG object.'''
pass
else:
nodestr = nodestr + ".evlist" #WG nodes set their solver params under this subheading
if obj.bend_radius == 0:
obj.bend_radius = inf
if warn: print self.name + ".buildNode(): Warning: bend_radius changed from 0.0 --> inf (straight waveguide)"
hcurv = 0
elif obj.bend_radius == inf:
hcurv = 0
else:
hcurv = 1.0/obj.bend_radius
wgString += nodestr + ".svp.hcurv={"+str(hcurv)+"}"+"\n"
#end if(WGlens/Taper)
#autorun & speed:
if self.autorun:
wgString += nodestr + ".mlp.autorun=1"+"\n"
else:
wgString += nodestr + ".mlp.autorun=0"+"\n"
if get_solver_speed():
wgString += nodestr + ".mlp.speed=1"+"\n" #0=best, 1=fast
else:
wgString += nodestr + ".mlp.speed=0"+"\n" #0=best, 1=fast
if get_horizontal_symmetry() is None:
wgString += nodestr + ".svp.hsymmetry=0"+"\n"
else:
if get_horizontal_symmetry() == 'none':
wgString += nodestr + ".svp.hsymmetry=0"+"\n"
elif get_horizontal_symmetry() == 'ExSymm':
wgString += nodestr + ".svp.hsymmetry=1"+"\n"
elif get_horizontal_symmetry() == 'EySymm':
wgString += nodestr + ".svp.hsymmetry=2"+"\n"
else:
print self.name + ".buildNode(): Invalid horizontal_symmetry. Please use: none, ExSymm, or EySymm"
if get_vertical_symmetry() is None:
wgString += nodestr + ".svp.vsymmetry=0"+"\n"
else:
if get_vertical_symmetry() == 'none':
wgString += nodestr + ".svp.vsymmetry=0"+"\n"
elif get_vertical_symmetry() == 'ExSymm':
wgString += nodestr + ".svp.vsymmetry=1"+"\n"
elif get_vertical_symmetry() == 'EySymm':
wgString += nodestr + ".svp.vsymmetry=2"+"\n"
else:
print self.name + ".buildNode(): Invalid vertical_symmetry. Please use: none, ExSymm, or EySymm"
if get_N() is None:
'''Default to 10'''
wgString += nodestr + ".mlp.maxnmodes={10}"+"\n"
else:
wgString += nodestr + ".mlp.maxnmodes={"+str(get_N())+"}"+"\n"
if get_NX() is None:
'''Default to 60'''
wgString += nodestr + ".mlp.nx={60}"+"\n"
nx_svp = 60
else:
wgString += nodestr + ".mlp.nx={"+str(get_NX())+"}"+"\n"
nx_svp = get_NX()
if get_NY() is None:
'''Default to 60'''
wgString += nodestr + ".mlp.ny={60}"+"\n"
ny_svp = 60
else:
wgString += nodestr + ".mlp.ny={"+str(get_NY())+"}"+"\n"
ny_svp = get_NY()
if get_min_TE_frac() is None:
'''Default to 0.0'''
wgString += nodestr + ".mlp.mintefrac={0}"+"\n"
else:
wgString += nodestr + ".mlp.mintefrac={"+str(get_min_TE_frac())+"}"+"\n"
if get_max_TE_frac() is None:
'''Default to 100.0'''
wgString += nodestr + ".mlp.maxtefrac={100}"+"\n"
else:
wgString += nodestr + ".mlp.maxtefrac={"+str(get_max_TE_frac())+"}"+"\n"
if get_min_EV() is None:
'''Default to -1e50'''
wgString += nodestr + ".mlp.evend={-1e+050}"+"\n"
else:
wgStrint += nodestr + ".mlp.evend={"+str(get_min_EV())+"}"+"\n"
if get_max_EV() is None:
'''Default to +1e50'''
wgString += nodestr + ".mlp.evstart={1e+050}"+"\n"
else:
wgStrint += nodestr + ".mlp.evend={"+str(get_max_EV())+"}"+"\n"
if get_RIX_tol() is None:
rix_svp = 0.010000
else:
rix_svp = get_RIX_tol()
if get_N_1d() is None:
n1d_svp = 30
else:
n1d_svp = get_N_1d()
if get_mmatch() is None:
mmatch_svp = 0
else:
mmatch_svp = get_mmatch()
if get_mode_solver() is None:
print self.name + '.buildNode(): Using default mode solver: "vectorial FDM real" '
wgString += nodestr + ".svp.solvid=71"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
else:
if get_mode_solver().lower() == 'vectorial FDM real'.lower():
wgString += nodestr + ".svp.solvid=71"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTE FDM real'.lower():
wgString += nodestr + ".svp.solvid=23"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTM FDM real'.lower():
wgString += nodestr + ".svp.solvid=39"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'vectorial FDM complex'.lower():
wgString += nodestr + ".svp.solvid=79"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTE FDM complex'.lower():
wgString += nodestr + ".svp.solvid=31"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTM FDM complex'.lower():
wgString += nodestr + ".svp.solvid=47"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'vectorial FMM real'.lower():
wgString += nodestr + ".svp.solvid=65"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTE FMM real'.lower():
wgString += nodestr + ".svp.solvid=17"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTM FMM real'.lower():
wgString += nodestr + ".svp.solvid=33"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'vectorial FMM complex'.lower():
wgString += nodestr + ".svp.solvid=73"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTE FMM complex'.lower():
wgString += nodestr + ".svp.solvid=25"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTM FMM complex'.lower():
wgString += nodestr + ".svp.solvid=41"+"\n"
solverString = nodestr + ".svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
else:
ErrStr = self.name + '.buildNode(): Invalid Modesolver String for Rectangular Waveguide (RWG): ' + str(get_mode_solver())
ErrStr += '\n Please see `help(pyfimm.set_mode_solver)`, and use one of the following:'
ErrStr += '\n vectorial FDM real, semivecTE FDM real,semivecTM FDM real, '
ErrStr += '\n vectorial FDM complex, semivecTE FDM complex , semivecTM FDM complex, '
ErrStr += '\n vectorial FMM real, semivecTE FMM real, semivecTM FMM real, '
ErrStr += '\n vectorial FMM complex, semivecTE FMM complex, or semivecTM FMM complex'
raise ValueError( ErrStr )
# Set wavelength:
wgString += self.nodestring + ".evlist.svp.lambda = %f \n"%(self.get_wavelength() )
wgString += solverString
return wgString
########################################
#### Old Deprecated Functions ####
def __buildNode2(self, name=None, parentNode=None):
'''Build the Fimmwave node of this Ridge/Rectangular (RWG) waveguide.
NOTE: This function has been replaced with a `buildNode` func. which uses the more extensible get_buildNode_str().
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
parent : Node object, optional
provide the parent (Project/Device) Node object for this waveguide.'''
if name: self.name = name
if parentNode: self.parent = parentNode
if DEBUG(): print self.name + ".buildNode(): self.parent.num=", self.parent.num
N_nodes = fimm.Exec("app.subnodes["+str(self.parent.num)+"].numsubnodes()")
node_num = int(N_nodes+1)
self.num = node_num
self.BuildRectNode()
self.built=True
#end buildNode2()
def __BuildRectNode(self):
'''Build the Node for Rectangular Coords (Slices).
NOTE: Not used anymore, replaced with get_buildNode_str()
'''
# build RWG
wgString = "app.subnodes["+str(self.parent.num)+"].addsubnode(rwguideNode,"+str(self.name)+")"+"\n"
sliceN = 1
for slc in self.slices:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].insertslice({"+str(sliceN)+"})"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].width = "+str(slc.width)+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].etch = "+str(slc.etch)+"\n"
wgString += (len(slc.layers)-1)*("app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].insertlayer(1)"+"\n")
layerN = 1
for lyr in slc.layers:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].size = "+str(lyr.thickness)+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr11 = "+str(lyr.n())+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr22 = "+str(lyr.n())+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].nr33 = "+str(lyr.n())+"\n"
if lyr.cfseg:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].slices[{"+str(sliceN)+"}].layers[{"+str(layerN)+"}].cfseg = "+str(1)+"\n"
layerN += 1
sliceN += 1
# build boundary conditions - metal by default
if get_left_boundary() is None:
'''Default to Electric Wall/metal'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 1"+"\n"
else:
if left_boundary().lower() == 'metal' or left_boundary().lower() == 'electric wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 1"+"\n"
elif left_boundary().lower() == 'magnetic wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 2"+"\n"
elif left_boundary().lower() == 'periodic':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 3"+"\n"
elif left_boundary().lower() == 'transparent':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 4"+"\n"
elif left_boundary().lower() == 'impedance':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.type = 5"+"\n"
else:
print self.name + '.buildNode(): Invalid input to set_left_boundary()'
if right_boundary() is None:
'''Default to Electric Wall/metal'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 1"+"\n"
else:
if right_boundary().lower() == 'metal' or right_boundary().lower() == 'electric wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 1"+"\n"
elif right_boundary().lower() == 'magnetic wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 2"+"\n"
elif right_boundary().lower() == 'periodic':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 3"+"\n"
elif right_boundary().lower() == 'transparent':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 4"+"\n"
elif right_boundary().lower() == 'impedance':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.type = 5"+"\n"
else:
print self.name + '.buildNode(): Invalid input to set_right_boundary()'
if bottom_boundary() is None:
'''Default to Electric Wall/metal'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 1"+"\n"
else:
if bottom_boundary().lower() == 'metal' or bottom_boundary().lower() == 'electric wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 1"+"\n"
elif bottom_boundary().lower() == 'magnetic wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 2"+"\n"
elif bottom_boundary().lower() == 'periodic':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 3"+"\n"
elif bottom_boundary().lower() == 'transparent':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 4"+"\n"
elif bottom_boundary().lower() == 'impedance':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.type = 5"+"\n"
else:
print self.name + '.buildNode(): Invalid input to set_bottom_boundary()'
if top_boundary() is None:
'''Default to Electric Wall/metal'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 1"+"\n"
else:
if top_boundary().lower() == 'metal' or top_boundary().lower() == 'electric wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 1"+"\n"
elif top_boundary().lower() == 'magnetic wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 2"+"\n"
elif top_boundary().lower() == 'periodic':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 3"+"\n"
elif top_boundary().lower() == 'transparent':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 4"+"\n"
elif top_boundary().lower() == 'impedance':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.type = 5"+"\n"
else:
print self.name + '.buildNode(): Invalid input to set_top_boundary()'
if pml_x() is None:
'''Default to 0.0'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.pmlpar = {0.0}"+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.pmlpar = {0.0}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].lhsbc.pmlpar = {"+str(pml_x())+"}"+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].rhsbc.pmlpar = {"+str(pml_x())+"}"+"\n"
if pml_y() is None:
'''Default to 0.0'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.pmlpar = {0.0}"+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.pmlpar = {0.0}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].topbc.pmlpar = {"+str(pml_y())+"}"+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].botbc.pmlpar = {"+str(pml_y())+"}"+"\n"
# set solver parameters
if self.bend_radius == 0:
'''Default to 0.0 -straight'''
hcurv = 0
else:
hcurv = 1.0/self.bend_radius
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hcurv={"+str(hcurv)+"}"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.autorun=0"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.speed=0"+"\n"
if horizontal_symmetry() is None:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=0"+"\n"
else:
if horizontal_symmetry() == 'none':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=0"+"\n"
elif horizontal_symmetry() == 'ExSymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=1"+"\n"
elif horizontal_symmetry() == 'EySymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=2"+"\n"
else:
print self.name + '.buildNode(): Invalid horizontal_symmetry. Please use: none, ExSymm, or EySymm'
if vertical_symmetry() is None:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=0"+"\n"
else:
if vertical_symmetry() == 'none':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=0"+"\n"
elif vertical_symmetry() == 'ExSymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=1"+"\n"
elif vertical_symmetry() == 'EySymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=2"+"\n"
else:
print self.name + '.buildNode(): Invalid vertical_symmetry. Please use: none, ExSymm, or EySymm'
if N() is None:
'''Default to 10'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxnmodes={10}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxnmodes={"+str(N())+"}"+"\n"
if get_NX() is None:
'''Default to 60'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.nx={60}"+"\n"
nx_svp = 60
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.nx={"+str(NX())+"}"+"\n"
nx_svp = get_NX()
if get_NY() is None:
'''Default to 60'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.ny={60}"+"\n"
ny_svp = 60
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.ny={"+str(NY())+"}"+"\n"
ny_svp = get_NY()
if min_TE_frac() is None:
'''Default to 0.0'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.mintefrac={0}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.mintefrac={"+str(min_TE_frac())+"}"+"\n"
if max_TE_frac() is None:
'''Default to 100.0'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxtefrac={100}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxtefrac={"+str(max_TE_frac())+"}"+"\n"
if min_EV() is None:
'''Default to -1e50'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={-1e+050}"+"\n"
else:
wgStrint += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={"+str(min_EV())+"}"+"\n"
if max_EV() is None:
'''Default to +1e50'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evstart={1e+050}"+"\n"
else:
wgStrint += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={"+str(max_EV())+"}"+"\n"
if RIX_tol() is None:
rix_svp = 0.010000
else:
rix_svp = RIX_tol()
if N_1d() is None:
n1d_svp = 30
else:
n1d_svp = N_1d()
if mmatch() is None:
mmatch_svp = 0
else:
mmatch_svp = mmatch()
if mode_solver() is None:
print 'Using default mode solver: "vectorial FDM real" '
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=71"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
else:
if get_mode_solver().lower() == 'vectorial FDM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=71"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTE FDM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=23"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTM FDM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=39"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'vectorial FDM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=79"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTE FDM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=31"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'semivecTM FDM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=47"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(nx_svp)+" "+str(ny_svp)+" 0 100 "+str(rix_svp)+"\n"
elif get_mode_solver().lower() == 'vectorial FMM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=65"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTE FMM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=17"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTM FMM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=33"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'vectorial FMM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=73"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTE FMM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=25"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
elif get_mode_solver().lower() == 'semivecTM FMM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=41"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V2 "+str(n1d_svp)+" "+str(mmatch_svp)+" 1 300 300 15 25 0 5 5"+"\n"
else:
print 'Invalid Rectangular Mode Solver. Please see `help(pyfimm.set_mode_solver)`, and use one of the following:'
print 'vectorial FDM real, semivecTE FDM real,semivecTM FDM real, '
print 'vectorial FDM complex, semivecTE FDM complex , semivecTM FDM complex, '
print 'vectorial FMM real, semivecTE FMM real, semivecTM FMM real, '
print 'vectorial FMM complex, semivecTE FMM complex, or semivecTM FMM complex'
raise ValueError("Invalid Modesolver String: " + str(get_mode_solver()) )
wgString += solverString
fimm.Exec(wgString)
#end buildRect()
#end Waveguide class
'''
###################################################
# Global Mode Solver Parameters #
# For rectangular waveguides #
###################################################
'''
def set_x_pml(pml_x):
'''Set length of Perfectly-Matched Layer in X (horizontal) direction.'''
global global_horizontal_pml
global_horizontal_pml = pml_x
def set_pml_x(w):
'''Backwards compatibility only. Should instead use set_x_pml.'''
print "Deprecation Warning: set_pml_x(): Use set_x_pml() instead."
set_x_pml(w)
def get_x_pml():
'''Get length of Perfectly-Matched Layer in horizontal direction (X). Returns None if not set.'''
global global_horizontal_pml
try:
global_horizontal_pml
except NameError:
global_horizontal_pml = None
return global_horizontal_pml
def get_pml_x():
'''Backwards compatibility only. Should instead use get_circ_pml.'''
print "Deprecation Warning: get_pml_x(): Use get_x_pml() instead."
return get_x_pml()
def pml_x():
'''Backwards compatibility only.
Please use get_***() instead.'''
print "DeprecationWarning: Use get_x_pml() instead."
return get_x_pml()
def set_y_pml(pml_y):
'''Set length of Perfectly-Matched Layer in Y (vertical) direction.'''
global global_vertical_pml
global_vertical_pml = pml_y
def set_pml_x(w):
'''Backwards compatibility only. Should instead use set_y_pml.'''
print "Deprecation Warning: set_pml_y(): Use set_y_pml() instead."
set_y_pml(w)
def get_y_pml():
'''Get length of Perfectly-Matched Layer in vertical direction (Y). Returns None if not set.'''
global global_vertical_pml
try:
global_vertical_pml
except NameError:
global_vertical_pml = None
return global_vertical_pml
def get_pml_y():
'''Backwards compatibility only. Should instead use get_y_pml.'''
print "Deprecation Warning: get_pml_y(): Use get_y_pml() instead."
return get_y_pml()
def pml_y():
'''Backwards compatibility only.
Please use get_***() instead.'''
print "DeprecationWarning: Use get_y_pml() instead."
return get_y_pml()
def set_top_boundary(bndry):
'''Set boundary type of top side of rectangular waveguide.
Parameters
----------
bndry : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
possibleArgs = ['metal' , 'magnetic wall' , 'periodic' , 'transparent' , 'impedance']
exists = len( np.where( np.array( type ) == np.array( possibleArgs) )[0] )
if not exists: raise ValueError("Allowed arguments are: 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' ")
global global_TBC
global_TBC = bndry
def get_top_boundary():
'''Get boundary type of top side of waveguides.
Returns
-------
type : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
global global_TBC
try:
global_TBC
except NameError:
global_TBC = None
return global_TBC
def top_boundary():
'''Backwards compatibility only. Should Instead get_top_boundary().'''
print "Deprecation Warning: top_boundary(): Use get_top_boundary() instead."
return get_top_boundary()
def set_bottom_boundary(bndry):
'''Set boundary type of bottom side of rectangular waveguide.
Parameters
----------
bndry : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
possibleArgs = ['metal' , 'magnetic wall' , 'periodic' , 'transparent' , 'impedance']
exists = len( np.where( np.array( type ) == np.array( possibleArgs) )[0] )
if not exists: raise ValueError("Allowed arguments are: 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' ")
global global_BBC
global_BBC = bndry
def get_bottom_boundary():
'''Get boundary type of top side of waveguides.
Returns
-------
type : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
global global_BBC
try:
global_BBC
except NameError:
global_BBC = None
return global_BBC
def bottom_boundary():
'''Backwards compatibility only. Should Instead get_bottom_boundary().'''
print "Deprecation Warning: bottom_boundary(): Use get_bottom_boundary() instead."
return get_bottom_boundary()
def set_left_boundary(bndry):
'''Set boundary type of left side of rectangular waveguide.
Parameters
----------
bndry : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
possibleArgs = ['metal' , 'magnetic wall' , 'periodic' , 'transparent' , 'impedance']
exists = len( np.where( np.array( type ) == np.array( possibleArgs) )[0] )
if not exists: raise ValueError("Allowed arguments are: 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' ")
global global_LBC
global_LBC = bndry
def get_left_boundary():
'''Get boundary type of top side of waveguides.
Returns
-------
type : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
global global_LBC
try:
global_LBC
except NameError:
global_LBC = None
return global_LBC
def left_boundary():
'''Backwards compatibility only. Should Instead get_left_boundary().'''
print "Deprecation Warning: left_boundary(): Use get_left_boundary() instead."
return get_left_boundary()
def set_right_boundary(bndry):
'''Set boundary type of right side of rectangular waveguide.
Parameters
----------
bndry : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
possibleArgs = ['metal' , 'magnetic wall' , 'periodic' , 'transparent' , 'impedance']
exists = len( np.where( np.array( type ) == np.array( possibleArgs) )[0] )
if not exists: raise ValueError("Allowed arguments are: 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' ")
global global_RBC
global_RBC = bndry
def get_right_boundary():
'''Get boundary type of top side of waveguides.
Returns
-------
type : string { 'metal' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
global global_RBC
try:
global_RBC
except NameError:
global_RBC = None
return global_RBC
def right_boundary():
'''Backwards compatibility only. Should Instead get_right_boundary().'''
print "Deprecation Warning: right_boundary(): Use get_right_boundary() instead."
return get_right_boundary()
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,437
|
demisjohn/pyFIMM
|
refs/heads/master
|
/example1 - Rect WG.py
|
'''
##########################################################################
Simple rectangular waveguide example using the FMM solver
Demonstrates basic WG construction & plotting capabilities.
In Spyder, make sure you run the script with the Run > Configure... settings
"Execute in current Python console"
or
"Execute in a new dedicated Python console" & "Interact with the Python console after execution"
to allow for dynamic commands and interacting with the objects you created.
If Spyder doesn't return an interactive console after running the script, then
check this setting in the menu Run > Configure...
Note that other python sessions using FimmWave connections should be terminated
before a new connection can be created, or the python terminal won't be able to
connect to FimmWave.
##########################################################################
'''
import pyfimm as pf # Every script must begin with this line
''' Get help on commands and objects by typing things like:
>>> help( pf )
>>> dir( pf ) # lists all functions and variables provided by the module
>>> help( pf.set_mode_solver ) # help on one function
>>> help( pf.Waveguide ) # help on the Waveguide object
>>> dir ( pf.Waveguide ) # list all functions/variables in the Waveguide object
>>> help( pf.Waveguide.mode(0).plot ) # help on funciton 'plot' of the Waveguide object
>>> help( pf.Circ.buildNode ) # help on the `buildNode` function of the Circ object
or even easier, while building the script interactively, or after execution, try:
>>> clad = pf.Material(1.4456)
>>> core = pf.Material(1.9835)
>>> help(clad) # Will show help on the Material object
>>> strip = pf.Waveguide( side(w_side) + center(w_core) + side(w_side) )
>>> dir(strip) # will show functions available in the Waveguide object
>>> help(strip.buildNode) # show help on the Waveguide.buildNode() method
after strip.calc(), try
>>> dir( strip.mode(0) ) # list the functions of a Mode object
>>> help( strip.mode(0).plot ) # detailed help on the mode plotting function
'''
pf.connect() # connect to the FimmWave application, which must already running.
# Set global Parameters (Your copy of FIMMWAVE has default values for these. You can change more than shown here. See `dir(pyfimm)`, `help(pyfimm)`, or open the file `pyFIMM/__pyfimm.py`
import sys, os
ScriptPath, ScriptFile = os.path.split( os.path.realpath(__file__) ) # Get directory of this script
pf.set_working_directory(ScriptPath) # Set this directory to the location of your script, which is usually given by sys.path[0]
pf.set_eval_type('n_eff') # FIMMWAVE will label modes by the effective index (options: n_eff or beta)
pf.set_mode_finder_type('stable') # options: stable or fast
pf.set_mode_solver('vectorial FMM real') # Three words, any permuation of: 'vectorial/semivecTE/semivecTM FDM/FMM real/complex'
pf.set_wavelength(1.55) # The unit of space is always 1 um
pf.set_N_1d(100) # No. of 1D modes found in each slice (FMM solver only)
pf.set_NX(100) # No. of horizontal grid points
pf.set_NY(100) # No. of vertical grid points
pf.set_N(3) # No. of modes to solve for
# Project Node: You must build a project node at the beginning of every script
wg_prj = pf.Project('Example 1 - WG Proj') # Make a Project object, pass a project name to the constructor
wg_prj.buildNode() # the buildNode() method is what makes FIMMWAVE build your python objects. If you don't call it, your script won't do anything!
# Construct the Waveguide Node
# WG Geometry:
t_clad = 6.0 # cladding thickness
t_core = 0.1
w_core = 2.8
w_side = 6.0 # cladding width
clad = pf.Material(1.4456) # Construct a Material python object, pass a refractive index to the constructor
core = pf.Material(1.9835)
center = pf.Slice( clad(t_clad) + core(t_core, cfseg=True) + clad(t_clad) )
side = pf.Slice( clad(2*t_clad + t_core) )
# Passing a thickness to a Material object as the argument creates a Layer object.
# Layer objects can be stacked (bottom to top) using the + operator - "clad" & "core" have been stacked here.
# You then pass a stack of Layer objects to the Slice object constructor
# You can also set the "cfseg" (Confinement Factor) flag for a layer if desired, as done here for the waveguide core.
strip = pf.Waveguide( side(w_side) + center(w_core) + side(w_side) )
# Construct a Waveguide object by adding Slice objects (left to right).
# You can pass the Slice width to the Slice object with ()'s
print "Printing `strip`:"
print strip # you can print your python objects to the shell to check them
strip.set_parent(wg_prj) # You have to tell python which project node to build the waveguide node under
strip.name = 'strip' # Name the node
strip.buildNode() # You must always build the node!
# The above three lines can also be done in one line:
#strip.buildNode(parent=wg_prj, name='strip')
print "Calculating Modes..."
strip.calc() # Tell FIMMWAVE to solve for the modes!
#strip.mode(0).plot() # Plot the fundamental mode with python!
#strip.mode(0).plot('Ey') # plot Ey instead
strip.mode('all').plot(title='Strip WG: All Modes') # plot all the calc'd modes (3 in this case) on one figure
#strip.mode( [0,2] ).plot() # plot only modes #0 and 2
#strip.delete() # delete FIMMWAVE nodes if you want to!
#wg_prj.delete()
#pf.disconnect() # close TCP connection to application. Other pyFIMM scripts won't be able to use FimmWave until you either disconnect or kill the script's shell entirely.
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,438
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__version.py
|
versionnum = "1.3.3" # the version number
versiondate = "2017-04-20" # the date of this version
version = "v"+versionnum+", "+versiondate
# if this file called by itself, print the version number:
if __name__ == "__main__":
print version
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,439
|
demisjohn/pyFIMM
|
refs/heads/master
|
/pyfimm/__Circ.py
|
'''Circ class, part of pyFIMM.
Objects & functions needed for cylindrical calculations.'''
from __globals import * # import global vars & FimmWave connection object
# DEBUG() variable is also set in __globals
from __pyfimm import * # access the main module's classes & modesolver functions
#from __Waveguide import * # not needed?
from __Mode import Mode # import Mode class
#from __pyfimm import DEBUG() # Value is set in __pyfimm.py
from numpy import inf # infinity, for hcurv/bend_radius
class Circ(Node):
"""pyFimm Circ object, 2-D Cylindrical-coordinate version of Waveguide (a fimmWave FWG waveguide, eg. optical fiber).
When a Thickness is supplied (in the cylindrical Z direction), this becomes a 3D structure.
This inherits from the pyFIMM Node objects.
Parameters
----------
layers : list
List containing the Layer objects used to generate this Circ.
thickness : float, optional
Apply a 3D length to this waveguide, in the direction of propagation.
Attributes
----------
type : {'cyl_waveguide'}
Currently = 'cyl_waveguide'. May be deprecate as it is unused.
length : float
Apply a 3D length to this waveguide, in the direction of propagation.
layers : list
List containing all the layers the Waveguide is constructed with. The layers are ordered beginning from the innermost to the outermost.
bend_radius : float
Bend Radius of the waveguide. The default value of `inf` indicates a straight waveguide.
Defined from the center of the waveguide cross-section to the axis of the bend.
Positive value means WG bends to the LEFT (so Right-Hand boundaries will see the radiatiing bend modes, if any). Negative value bends the opposite way.
modes : list
lists the modes calc'd for this waveguide (after Waveguide.calc() )
built : { True | False }
Has this node been built in FimmWave yet?
nodestring : string
The fimmwave string pointing to this waveguide's node. eg. "app.subnodes[1].subnodes[3]"
Does not have a trailing period.
Methods
-------
This is a partial list - see `dir(pf.Circ)` to see all methods.
Please see help on a specific function via `help(pf.Circ)` for detailed up-to-date info on accepted arguments etc.
mode(modenum)
modenum: int
Returns the specified Mode object. Mode(0) is usually the fundamental mode, depending on the solver options.
get_radius()
Return total radius of this Waveguide, by adding up radius of each contained Layer.
get_layer_radii()
Return the thickness of each layer in this Waveguide, as list.
buildNode( [name=, parentNode=] )
Build the Fimmwave node of this Fiber/Cylindrical (FWG) waveguide.
get_buildNode_str(nodestr [, obj=None, target=None])
Return the fimmwave commands needed to build this waveguide node. This command does not create the new waveguide node first (ie. it does not run `app.subnodes[1].addsubnode(rwguideNode, WGname)` )
So you must create the appropriate type of waveguide node first, and then issue the commands returned by this func. The massive multi-line string includes all the modesolver settings needed to calculate the waveguide afterwards.
get_solver_str(nodestr ...)
Returns just the MOLAB mode-solver configuration as a fimmwave-executable string.
set_autorun()
Set the fimmwave "autorun" flag which allows FimmProp to calc the modes when needed.
unset_autorun():
Unset the fimmwave "autorun" flag.
set_material_database( PathString )
Not recommended - it is safer to use a global material file, and have that file `include` other material files. FimmProp Devices only support a single global materials file.
PathString : string
Path to a FimmWave material database (*.mat) for this waveguide node, if different from the globally set one (see `set_material_database()` ).
get_material_database()
Returns path to FimmWave material database (*.mat) for this waveguide node, if set.
unset_material_database()
Unsets a custom material database for this waveguide node, such that the globally set one (see `set_material_database()` ) will be used instead.
set_joint_type(type)
Set the type of FimmProp joint to use after this waveguide has been inserted into a Device.
get_joint_type(type)
Get the type of FimmProp joint to use after this waveguide has been inserted into a Device.
set_wavelength( wl )
Set the wavelength of this guide.
get_wavelength()
Return the wavelength of this guide.
Examples
--------
Create a Circ by calling it (instancing it) with Materials called with a radius.
The first Material is at the center (r=0), and construction proceeds from the inside to the outer radius.
>>> DBRLo = Circ( AlGaAs(5.0) ) # 5.00 radius circle
>>> DBRHi = Circ( GaAs(5.0) )
>>> CurrentAperture = Circ( AlGaAs(3.0) + AlOx(2.0) )
3.0 um radius of AlGaAs in the center, clad by 2um of AlOx.
>>> CurrentAperture.buildNode( name='Current Aperture', parentNode=wg_prj )
>>> CurrentAperture.calc()
>>> CurrentAperture.mode(0).plot() # plot the mode!
"""
def __init__(self,*args):
if len(args) >= 1:
self.type = 'cyl_waveguide'
self.name = None
self.autorun = True
self.built=False
self.length = 0.0
self.__wavelength = get_wavelength() # get global wavelength
self.layers = []
for lyr in args[0]:
self.layers.append(lyr) # re-create a list of layers
self.length = 0
self.modes = []
self.bend_radius = inf # inf = straight WG
self.__materialdb = None
else:
raise ValueError('Invalid number of input arguments to Circ()')
if len(args) == 2:
self.length = args[1] # can pass length as 2nd arg if desired
def __str__(self):
'''How to `print` this object
TO DO: reproduce the Layer.__repr__ string here, to have it print Radius= instead of Thickness='''
str=""
if self.name: str += "Name: '"+self.name+"'\n"
str = 'Radius = %7.4f \n' % self.get_radius()
for i,lyr in enumerate(self.layers):
if i == 0:
str += 3*'*' + ' Innermost Layer: ' + 3*'*' + '\n%s' % (lyr) + '\n'
elif i == (len(self)-1):
str += 3*'*' + ' Outermost Layer: ' + 3*'*' + '\n%s' % (lyr) + '\n'
else:
str += 3*'*' + ' Middle Layer %i: ' % i + 3*'*' + '\n%s' % lyr + '\n'
return str
#def __call__(self,length):
# '''Calling ThisCirc(thick) sets the Thickness of this Circ, and returns a list containing this Slice.'''
# self.length = length
# return [self]
def __call__(self,length):
'''Calling a WG object with one argument creates a Section of passed length, and returns a list containing this new Section.
Usually passed directly to Device as so:
>>> NewDevice = pyfimm.Device( WG1(10.5) + WG2(1.25) + WG3(10.5) )
Parameters
----------
length : float
Pass a length (microns). This will be applied to the returned Section Object, which will also contain a reference to this waveguide object.
'''
# Instantiate a Section obj with 1 args
out = [ Section( self, length ) ] # include cfseg
return out
def __add__(self,other):
'''Addition returns a list containing each Circ'''
return [self,other]
def __len__(self):
'''len(ThisCirc) returns the number of Layers in ThisCirc'''
return len(self.layers)
def get_radius(self):
'''Return summed Radius of all Layers in this Circ - for compatibility with Slice'''
thck = 0
for lyr in self.layers:
thck += lyr.thickness
return thck
def radius():
'''Backwards compatibility only. Should Instead get_radius().'''
print "Deprecation Warning: radius(): Use get_radius() instead."
return get_radius()
def layer_radii(self):
'''Return list of Radii of each Layer in this Circ - for compatibility with Slice'''
lyr_thck = []
for lyr in self.layers:
lyr_thck.append(lyr.thickness)
return lyr_thck
def mode(self,modeN):
'''Circ.mode(int): Return the specified pyFimm Mode object for this waveguide. Fundamental mode is mode(0).'''
return Mode(self, modeN,"app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.")
def calc(self):
'''Calculate/Solve for the modes of this Waveguide'''
fimm.Exec("app.subnodes[{"+str(self.parent.num)+"}].subnodes[{"+str(self.num)+"}].evlist.update()")
def set_autorun(self):
'''FimmProp Device will automatically calculate modes as needed.'''
self.autorun = True
def unset_autorun(self):
'''FimmProp Device will Not automatically calculate modes as needed.'''
self.autorun = False
def set_material_database(self, path):
'''Set a material database for this waveguide node (overrides the global setting of `pyfimm.set_material_database(path)` ).'''
self.__materialdb = str(path)
def get_material_database(self):
'''Returns a custom material database for this waveguide node.'''
return self.__materialdb
def unset_material_database(self):
'''Clears the custom material database for this waveguide node. The global setting `pyfimm.set_material_database(path)` will be used instead.'''
self.__materialdb = None
def set_joint_type(self, jtype, jointoptions=None):
'''Set the joint type after (on right side of) this waveguide, if used in a Device.
type : { 'complete' | 'special complete' | 'normal fresnel' | 'oblique fresnel' }, case-insensitive
synonyms for 'complete' are { 0 }, and is also the default if unset.
synonyms for 'special complete' are { 3 | 'special' }
synonyms for 'normal fresnel' are { 1 | 'fresnel' }
synonyms for 'oblique fresnel' are { 2 }
jointoptions : Dictionary{} of options. Allows for the Device.buildnode() to set various joint options, such as angle etc. Please see help(Device) for what the possible options are.
'''
if isinstance(jtype, str): jtype=jtype.lower() # make lower case
if jtype == 0 or jtype == 'complete':
self.__jointtype = 0
if jtype == 1 or jtype == 'normal fresnel' or jtype == 'fresnel':
self.__jointtype = 1
if jtype == 2 or jtype == 'oblique fresnel':
self.__jointtype = 2
if jtype == 3 or jtype == 'special complete' or jtype == 'special':
self.__jointtype = 3
if isinstance(jointoptions, dict):
self.__jointoptions=jointoptions
elif jointoptions!=None:
ErrStr = "set_joint_type(): `jointoptions` should be a dictionary. See help(Device) for the available options."
raise ValueError(ErrStr)
#end set_joint_type()
def get_joint_type(self, *args):
'''get_joint_type( [asnumeric] )
Get the joint type that will be placed between this waveguide and the next, when inserted into a Device.
asnumeric : boolean, optional
A True value will cause the output to be numeric, rather than string. See help(set_joint_type) for the numerical/string correlations. False by default.
(FYI, `asnumeric=True` is used in Device.buildNode() )
'''
try:
self.__jointtype # see if variable exists
except AttributeError:
# if the variable doesn't exist yet.
if DEBUG(): print "unset " + self.name + ".__jointtype --> 'complete' "
self.__jointtype = 0
if len(args) == 0: asnumeric = False # output as string by default
if len(args) == 1: asnumeric = args[0]
if len(args) > 1: raise ValueError("get_joint_type(): Too many arguments provided.")
if asnumeric:
out= self.__jointtype
else:
if self.__jointtype == 0:
out= 'complete'
elif self.__jointtype == 1:
out= 'normal fresnel'
elif self.__jointtype == 2:
out= 'oblique fresnel'
elif self.__jointtype == 3:
out= 'special complete'
#if DEBUG(): print "get_joint_type(): ", out
return out
#end get_joint_type()
def set_wavelength(self, wl):
'''Set the wavelength for the waveguide. The object use this wavelength in their MOLAB options.
Note that, after building, the object's wavelength (`WGobj.get_wavelength()` ) can be different from the global pyFIMM wavelength (`pyFIMM.get_wavelength`).
The global setting (`pyFIMM.set_wavelength()`) is acquired when the object is first created.
Parameters
----------
wl : float
The wavelength in micrometers.
'''
if self.built:
self.__wavelength = float(wl)
fimm.Exec( self.nodestring + ".evlist.svp.lambda = " + str(self.__wavelength) + " \n" )
else:
self.__wavelength = float(wl)
def get_wavelength(self):
'''Return the wavelength (float) for this specific Device (may be different from the global pyFIMM wavelength in `pyFIMM.get_wavelength()` after the guide is built).'''
return self.__wavelength
####################################################
#### Cylindrical Waveguide Node Construction ####
####################################################
def buildNode(self, name=None, parent=None, overwrite=False, warn=True):
'''Build the Fimmwave node of this cylindrical (FWG) waveguide.
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
parent : Node object, optional
Provide the parent (Project/Device) Node object for this waveguide.
overwrite : { True | False }, optional
Overwrite existing node of same name? Defaults to False, which will rename the node if it has the same name as an existing node.
warn : {True | False}, optional
Print notification if overwriting a node? True by default.
'''
if name: self.name = name
if parent: self.parent = parent
nodestring="app.subnodes["+str(self.parent.num)+"]"
self._checkNodeName(nodestring, overwrite=overwrite, warn=warn) # will alter the node name if needed
N_nodes = fimm.Exec("app.subnodes["+str(self.parent.num)+"].numsubnodes()")
node_num = int(N_nodes+1)
self.num = node_num
# build FWG
wgString = "app.subnodes["+str(self.parent.num)+"].addsubnode(fwguideNode,"+str(self.name)+")"+"\n"
self.nodestring = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"]"
fimm.Exec( wgString + self.get_buildNode_str(self.nodestring, warn=warn) )
self.built = True
#end buildNode()
def get_buildNode_str(self, nodestr, obj=None, target=None, warn=True):
'''Return the node construction string for either a standalone waveguide or device.
This is for a Cylindrical/Fiber (FWG) waveguide.
The new Waveguide subnode should be created BEFORE calling this function, so that you can pass the correct node string.
Parameters
----------
nodestr : str
The entire base-string to address the necessary node. For example:
>>> nodestr = "app.subnodes[1].subnodes[2]"
the subnode referenced should be the NEW subnode to be created (ie. one higher than previously in existence). In normal operation, the new subnode has already been created by WG.buildnode().
obj : Circ object, optional
Defaults to `self`. Can pass another object instead, to get the buildNode string for that.
target : { 'wglens' | 'taper' }, optional
Omits certain parameters from being set depending on target. Used for building tapers.
'''
'''
newnodestr = ""
nsplit = nodestr.split('.')
### Remove last node component, to create new subnode
for strang in nsplit[0:-1] :
newnodestr += strang + '.'
if DEBUG(): print "newnodestr: \n%s"%newnodestr, "nodestr: \n%s"%nodestr
if target == 'waveguide':
newnodestr2 = "app.subnodes["+str(self.parent.num)+"]"
nodestr2 = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"]"
if DEBUG(): print "newnodestr2: \n%s"%newnodestr2, "nodestr2: \n%s"%nodestr2
'''
if not obj: obj=self
# build FWG Node
if DEBUG(): print "Circ: "+self.name+".__get_buildNode_str(): "
# check for custom material DB in this WG node.
if not self.__materialdb:
'''Use global material DB if this WG doesn't have a custom one set.'''
matDB = get_material_database()
else:
matDB = self.__materialdb
#if DEBUG(): print "Using custom matDB: `%s`"%matDB
wgString="" # The fimmwave string to return
wgString += nodestr + ".deletelayer(2) \n" # FWG always starts with 2 layers, delete the 2nd one.
if matDB:
#if DEBUG(): print "setting MaterBase file to: '%s'"%matDB
wgString += nodestr + ".setmaterbase(" + matDB + ") \n"
layerN = 1
for lyr in obj.layers:
if DEBUG(): print "Layer ", layerN, "; radius:", lyr.thickness
if layerN > 1: wgString += nodestr + ".insertlayer("+str(layerN)+") \n"
wgString += nodestr + ".layers[{"+str(layerN)+"}].size = "+str(lyr.thickness)+"\n"
if lyr.material.type == 'rix':
wgString += \
nodestr + ".layers[{"+str(layerN)+"}].nr11 = "+str(lyr.n())+"\n"+ \
nodestr + ".layers[{"+str(layerN)+"}].nr22 = "+str(lyr.n())+"\n"+ \
nodestr + ".layers[{"+str(layerN)+"}].nr33 = "+str(lyr.n())+"\n"
elif lyr.material.type == 'mat':
if DEBUG(): print "Layer %i: mx="%(layerN), lyr.material.mx, " // my=", lyr.material.my
wgString += nodestr + ".layers[{"+str(layerN)+"}].setMAT(" + str(lyr.material.mat) + ") \n"
if lyr.material.mx: wgString += nodestr + ".layers[{"+str(layerN)+"}].mx = "+str(lyr.material.mx)+"\n"
if lyr.material.my: wgString += nodestr + ".layers[{"+str(layerN)+"}].my = "+str(lyr.material.my)+"\n"
if lyr.cfseg:
wgString += nodestr + ".layers[{"+str(layerN)+"}].cfseg = 1 \n"
layerN += 1
#end for(obj.layers)
# Set PML layer:
if get_circ_pml() is None:
'''PML width is 0.0 by default, defined here'''
wgString += nodestr + ".bc.pmlpar = {0.0}"+"\n"
else:
wgString += nodestr + ".bc.pmlpar = {"+str( get_circ_pml() )+"}"+"\n"
# build boundary conditions - metal by default
if get_circ_boundary() is None:
'''Default to Electric Wall/metal'''
if warn: print self.name + ".buildNode(): circ_boundary: Using electric wall boundary."
wgString += nodestr + ".bc.type = 1"+"\n"
else:
if get_circ_boundary().lower() == 'metal' or get_circ_boundary().lower() == 'electric wall':
wgString += nodestr + ".bc.type = 1"+"\n"
elif get_circ_boundary().lower() == 'magnetic wall':
wgString += nodestr + ".bc.type = 2"+"\n"
elif get_circ_boundary().lower() == 'periodic':
wgString += nodestr + ".bc.type = 3"+"\n"
elif get_circ_boundary().lower() == 'transparent':
wgString += nodestr + ".bc.type = 4"+"\n"
elif get_circ_boundary().lower() == 'impedance':
wgString += nodestr + ".bc.type = 5"+"\n"
else:
print self.name + ".buildNode(): Invalid input to set_circ_boundary()"
wgString += self.get_solver_str(nodestr, obj=obj, target=target)
#if DEBUG(): print "__get_buildNode_Str(): wgString=\n", wgString
return wgString
#end __buildNode()
def get_solver_str(self, nodestr, obj=None, target=None):
''' Return only the Solver ('svp') and mode solver (MOLAB, 'mpl') params for creating this node.
Used for building Tapers, when the WG is already built otherwise.'''
if not obj: obj=self
#if DEBUG(): print "Circ.get_solver_str()... "
wgString = ""
# set solver parameters
if target == 'wglens' or target == 'taper':
'''hcurv/bend_radius is set separately for Taper or WGLens, since they could have a different curvature from their base WG object.'''
pass
else:
nodestr = nodestr + ".evlist" #WG nodes set their solver params under this subheading
if obj.bend_radius == 0:
obj.bend_radius = inf
if warn: print self.name + ".buildNode(): Warning: bend_radius = 0.0 --> inf (straight waveguide)"
hcurv = 0
elif obj.bend_radius == inf:
hcurv = 0
else:
hcurv = 1.0/obj.bend_radius
wgString += nodestr + ".svp.hcurv={"+str(hcurv)+"}"+"\n"
#end if(WGlens/Taper)
#autorun & speed:
if self.autorun:
wgString += nodestr + ".mlp.autorun=1"+"\n"
else:
wgString += nodestr + ".mlp.autorun=0"+"\n"
if get_solver_speed()==1:
wgString += nodestr + ".mlp.speed=1"+"\n" #0=best, 1=fast
else:
wgString += nodestr + ".mlp.speed=0"+"\n" #0=best, 1=fast
if get_horizontal_symmetry() is None:
wgString += nodestr + ".svp.hsymmetry=0"+"\n"
else:
if get_horizontal_symmetry() == 'none':
wgString += nodestr + ".svp.hsymmetry=0"+"\n"
elif get_horizontal_symmetry() == 'ExSymm':
wgString += nodestr + ".svp.hsymmetry=1"+"\n"
elif get_horizontal_symmetry() == 'EySymm':
wgString += nodestr + ".svp.hsymmetry=2"+"\n"
else:
raise ValueError( 'Invalid horizontal_symmetry. Please use: none, ExSymm, or EySymm')
if get_vertical_symmetry() is None:
wgString += nodestr + ".svp.vsymmetry=0"+"\n"
else:
if get_vertical_symmetry() == 'none':
wgString += nodestr + ".svp.vsymmetry=0"+"\n"
elif get_vertical_symmetry() == 'ExSymm':
wgString += nodestr + ".svp.vsymmetry=1"+"\n"
elif get_vertical_symmetry() == 'EySymm':
wgString += nodestr + ".svp.vsymmetry=2"+"\n"
else:
raise ValueError( 'Inalid horizontal_symmetry. Please use: none, ExSymm, or EySymm')
wgString += nodestr + ".mlp.maxnmodes={"+str( get_N() )+"}"+"\n"
wgString += nodestr + ".mlp.nx={"+str( get_NX() )+"}"+"\n"
nx_svp = get_NX()
wgString += nodestr + ".mlp.ny={"+str( get_NY() )+"}"+"\n"
ny_svp = get_NY()
wgString += nodestr + ".mlp.mintefrac={"+str( get_min_TE_frac() )+"}"+"\n"
wgString += nodestr + ".mlp.maxtefrac={"+str( get_max_TE_frac())+"}"+"\n"
if get_min_EV() is None:
'''Default to -1e50'''
wgString += nodestr + ".mlp.evend={-1e+050}"+"\n"
else:
wgStrint += nodestr + ".mlp.evend={"+str(get_min_EV())+"}"+"\n"
if get_max_EV() is None:
'''Default to +1e50'''
wgString += nodestr + ".mlp.evstart={1e+050}"+"\n"
else:
wgStrint += nodestr + ".mlp.evend={"+str(get_max_EV())+"}"+"\n"
if get_RIX_tol() is None:
rix_svp = 0.010000
else:
rix_svp = get_RIX_tol()
if get_N_1d() is None:
n1d_svp = 30
else:
n1d_svp = get_N_1d()
if get_mmatch() is None:
mmatch_svp = 0
else:
mmatch_svp = get_mmatch()
if get_mode_solver() is None:
print self.name + '.buildNode(): Using Default Mode Solver: "Vectorial FDM Real" '
wgString += nodestr + ".svp.solvid=192"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(n1d_svp)+" "+str(0)+" "+str( get_N() )+" "+str( 1 )+" "+str( get_Np() )+" "+"\n"
else:
if get_mode_solver().lower() == 'Vectorial SMF'.lower():
wgString += nodestr + ".svp.solvid=50"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTE SMF'.lower():
wgString += nodestr + ".svp.solvid=18"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTM SMF'.lower():
wgString += nodestr + ".svp.solvid=34"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'Vectorial Gaussian'.lower():
wgString += nodestr + ".svp.solvid=53"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTE Gaussian'.lower():
wgString += nodestr + ".svp.solvid=21"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTM Gaussian'.lower():
wgString += nodestr + ".svp.solvid=37"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'Vectorial GFS Real'.lower():
wgString += nodestr + ".svp.solvid=68"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Scalar GFS Real'.lower():
wgString += nodestr + ".svp.solvid=4"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Vectorial FDM real'.lower():
wgString += nodestr + ".svp.solvid=192"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(n1d_svp)+" "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Vectorial FDM complex'.lower():
wgString += nodestr + ".svp.solvid=200"+"\n"
solverString = nodestr + ".svp.buff=V1 "+str(n1d_svp)+" "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
else:
print self.name + '.buildNode(): Invalid Cylindrical Mode Solver. Please see `help(pyfimm.set_mode_solver)`, and use one of the following options :'
print ' Finite-Difference Method solver: "vectorial FDM real" , "vectorial FDM complex",'
print ' General Fiber Solver: "vectorial GFS real" , "scalar GFS real",'
print ' Single-Mode Fiber solver: "Vectorial SMF" , "SemivecTE SMF" , "SemivecTM SMF",'
print ' Gaussian Fiber Solver (unsupported): "Vectorial Gaussian" , "SemivecTE Gaussian" , "SemivecTM Gaussian".'
raise ValueError("Invalid Modesolver String: " + str(get_mode_solver()) )
# Set wavelength:
wgString += self.nodestring + ".evlist.svp.lambda = " + str( self.get_wavelength() ) + " \n"
wgString += solverString
return wgString
#end __get_solver_str()
def __buildNode2(self, name=None, parentNode=None):
'''Build the Fimmwave node of this cylindrical (FWG) waveguide.
NOTE: This function has been deprecated, in preference of the new buildNode, which uses
the more extensible get_buildNodeStr() function.
Parameters
----------
name : string, optional
Provide a name for this waveguide node.
parent : Node object, optional
Provide the parent (Project/Device) Node object for this waveguide.
'''
if name: self.name = name
if parentNode: self.parent = parentNode
N_nodes = fimm.Exec("app.subnodes["+str(self.parent.num)+"].numsubnodes()")
node_num = int(N_nodes+1)
self.num = node_num
self.BuildCylNode()
#end buildNode2()
def __BuildCylNode(self):
'''Build the Node for Cylindrical Coords (Circ's).
NOTE: This function has been deprecated, in preference of the new BuildCylNode, which uses
the more extensible get_buildNodeStr() function.
To DO
-----
Add PML setting per-WG? Or just do global PML like Jared's? I like global (manual might say all PMLs should be the same)
Currently only supports Step Index:
Allow Gaussian profile, which takes { Radius (um), Sigma (um), neff }
Spline {splineNseg (int), cornerpoint (bool)}...
'''
# build FWG
if DEBUG(): print "BuildCylNode(): "
wgString = "app.subnodes["+str(self.parent.num)+"].addsubnode(fwguideNode,"+str(self.name)+")"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].deletelayer(2) \n" # FWG always starts with 2 layers, delete the 2nd one.
layerN = 1
for lyr in self.layers:
if DEBUG(): print "BuildCylNode(): layer ", layerN, "; radius:", lyr.thickness, "; n:", lyr.n()
if layerN > 1: wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].insertlayer("+str(layerN)+") \n"
wgString += \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].layers[{"+str(layerN)+"}].size = "+str(lyr.thickness)+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].layers[{"+str(layerN)+"}].nr11 = "+str(lyr.n())+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].layers[{"+str(layerN)+"}].nr22 = "+str(lyr.n())+"\n"+ \
"app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].layers[{"+str(layerN)+"}].nr33 = "+str(lyr.n())+"\n"
if lyr.cfseg:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes[{"+str(self.num)+"}].layers[{"+str(layerN)+"}].cfseg = "+str(1)+"\n"
layerN += 1
#end for(self.layers)
# Set PML layer:
if get_circ_pml() is None:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.pmlpar = {0.0}"+"\n"
else:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.pmlpar = {"+str( get_circ_pml() )+"}"+"\n"
# build boundary conditions - metal by default
if get_circ_boundary() is None:
print "Using electric wall boundary."
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 1"+"\n"
else:
if get_circ_boundary().lower() == 'metal' or get_circ_boundary().lower() == 'electric wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 1"+"\n"
elif get_circ_boundary().lower() == 'magnetic wall':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 2"+"\n"
elif get_circ_boundary().lower() == 'periodic':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 3"+"\n"
elif get_circ_boundary().lower() == 'transparent':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 4"+"\n"
elif get_circ_boundary().lower() == 'impedance':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].bc.type = 5"+"\n"
else:
print('Invalid input to set_circ_boundary()')
# set solver parameters
if self.bend_radius == 0:
hcurv = 0
else:
hcurv = 1.0/self.bend_radius
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hcurv={"+str(hcurv)+"}"+"\n"
#autorun & speed:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.autorun=0"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.speed=0"+"\n"
if horizontal_symmetry() is None:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=0"+"\n"
else:
if horizontal_symmetry() == 'none':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=0"+"\n"
elif horizontal_symmetry() == 'ExSymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=1"+"\n"
elif horizontal_symmetry() == 'EySymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.hsymmetry=2"+"\n"
else:
print 'Inalid horizontal_symmetry. Please use: none, ExSymm, or EySymm'
if vertical_symmetry() is None:
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=0"+"\n"
else:
if vertical_symmetry() == 'none':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=0"+"\n"
elif vertical_symmetry() == 'ExSymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=1"+"\n"
elif vertical_symmetry() == 'EySymm':
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.vsymmetry=2"+"\n"
else:
print 'Inalid horizontal_symmetry. Please use: none, ExSymm, or EySymm'
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxnmodes={"+str( get_N() )+"}"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.nx={"+str( get_NX() )+"}"+"\n"
nx_svp = get_NX()
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.ny={"+str( get_NY() )+"}"+"\n"
ny_svp = get_NY()
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.mintefrac={"+str( get_min_TE_frac() )+"}"+"\n"
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.maxtefrac={"+str(max_TE_frac())+"}"+"\n"
if get_min_EV() is None:
'''Default to -1e50'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={-1e+050}"+"\n"
else:
wgStrint += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={"+str(get_min_EV())+"}"+"\n"
if max_EV() is None:
'''Default to +1e50'''
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evstart={1e+050}"+"\n"
else:
wgStrint += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.mlp.evend={"+str(max_EV())+"}"+"\n"
if RIX_tol() is None:
rix_svp = 0.010000
else:
rix_svp = RIX_tol()
if N_1d() is None:
n1d_svp = 30
else:
n1d_svp = N_1d()
if mmatch() is None:
mmatch_svp = 0
else:
mmatch_svp = mmatch()
if get_mode_solver() is None:
print 'Using Default Mode Solver: "Vectorial FDM Real" '
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=192"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(n1d_svp)+" "+str(0)+" "+str( get_N() )+" "+str( 1 )+" "+str( get_Np() )+" "+"\n"
else:
if get_mode_solver().lower() == 'Vectorial SMF'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=50"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTE SMF'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=18"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTM SMF'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=34"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'Vectorial Gaussian'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=53"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTE Gaussian'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=21"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'SemiVecTM Gaussian'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=37"+"\n"
solverString = "\n"
elif get_mode_solver().lower() == 'Vectorial GFS Real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=68"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Scalar GFS Real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=4"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Vectorial FDM real'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=192"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(n1d_svp)+" "+str( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
elif get_mode_solver().lower() == 'Vectorial FDM complex'.lower():
wgString += "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.solvid=200"+"\n"
solverString = "app.subnodes["+str(self.parent.num)+"].subnodes["+str(self.num)+"].evlist.svp.buff=V1 "+str(n1d_svp)+" "+sstr( get_Nm()[0] )+" "+str( get_Nm()[1] )+" "+str( get_Np()[0] )+" "+str( get_Np()[1] )+" "+"\n"
else:
print 'Invalid Cylindrical Mode Solver. Please see `help(pyfimm.set_mode_solver)`, and use one of the following options :'
print 'Finite-Difference Method solver: "vectorial FDM real" , "vectorial FDM complex",'
print 'General Fiber Solver: "vectorial GFS real" , "scalar GFS real",'
print 'Single-Mode Fiber solver: "Vectorial SMF" , "SemivecTE SMF" , "SemivecTM SMF",'
print 'Gaussian Fiber Solver (unsupported): "Vectorial Gaussian" , "SemivecTE Gaussian" , "SemivecTM Gaussian".'
raise ValueError("Invalid Modesolver String: " + str(get_mode_solver()) )
wgString += solverString
fimm.Exec(wgString)
self.built=True
#end buildCyl()
#end class Slice
############################################
#### Cylindrical Functions ####
############################################
############################################
#### Mode Solver Parameters ####
############################################
def set_circ_pml( w ):
'''Set with of PML (Perfectly Matched Layer) for cylindrical waveguides.'''
global global_circ_pml
global_circ_pml = w
def get_circ_pml():
'''Get width of cylindrical PML (Perfectly Matched Layer). '''
global global_circ_pml
try:
global_circ_pml
except NameError:
global_circ_pml = None
return global_circ_pml
def set_pml_circ(w):
'''Backwards compatibility only. Should instead use set_circ_pml.'''
print "Deprecation Warning: set_pml_circ(): Use set_circ_pml() instead."
set_circ_pml(w)
def get_pml_circ():
'''Backwards compatibility only. Should instead use get_circ_pml.'''
print "Deprecation Warning: get_pml_circ(): Use get_circ_pml() instead."
return get_circ_pml()
def set_circ_boundary(type):
'''Set boundary type of cylindrical waveguide.
Default value, if unset, is 'electric wall'.
Parameters
----------
type : string { 'electric wall' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
possibleArgs = ['electric wall' , 'metal', 'magnetic wall' , 'periodic' , 'transparent' , 'impedance']
exists = len( np.where( np.array( type ) == np.array( possibleArgs) )[0] )
if not exists: raise ValueError("Allowed arguments are: 'electric wall' (aka. 'metal') | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' ")
type=type.lower()
if type == 'metal':
type = 'electric wall'
print "set_circ_boundary('metal'): setting `type` to synonym 'electric wall'."
global global_CBC
global_CBC = type.lower()
def get_circ_boundary():
'''Get boundary type for cylindrical waveguides.
See `help(pyfimm.set_circ_boundary)` for more info.
Returns
-------
type : string { 'electric wall' | 'magnetic wall' | 'periodic' | 'transparent' | 'impedance' }
'''
global global_CBC
try:
global_CBC
except NameError:
global_CBC = None
return global_CBC
#def circ_boundary():
# '''Backwards compatibility only. Should Instead get_circ_boundary().'''
# print "Deprecation Warning: circ_boundary(): Use get_circ_boundary() instead."
# return get_circ_boundary()
def get_Nm():
'''For General Fiber Solver (GFS) or Fibre FDM solver, set (min,max) of m-order (azimuthal/axial quantum number) modes to solve for.
This is the Theta mode number - how many nodes/how fast the fields vary in the Theta direction.
m goes from 0 -> infinity.
See "GFS Fibre Solver"/"buff" params or Sec. 5.7.3 in fimmwave manual.
Returns
-------
nm : 2-element tuple
(nm_min, nm_max): min & max m-order. Defaults to (0,1) if unset.'''
global global_Nm
try:
global_Nm
except NameError:
global_Nm = (0,1) # the default value
return (global_Nm[0],global_Nm[1])
def set_Nm(nm):
'''For General Fiber Solver (GFS) or Fibre FDM solver, set (min,max) of m-order (azimuthal/axial quantum number) modes to solve for.
This is the Theta mode number - how many nodes/how fast the fields vary in the Theta direction.
m goes from 0 -> infinity.
See "GFS Fibre Solver"/"buff" params or Sec. 5.7.3 in fimmwave manual.
Parameters
----------
nm : integer, OR tuple/list (any iterable) of 2 integers
(min_nm, max_nm): min & max m-orders to solve. Defaults to (0,1)
Examples
--------
>>> set_Nm(0)
Solve only for m-order=0, which has no nulls in the theta direction.
>>> set_Nm( [0,10] )
Solve for m-orders 0-10.
'''
if isinstance(nm, int):
nm = list([nm]) # convert an integer to list
else:
nm = [int(x) for x in nm] # make sure all args convert to integer, and generate list
if len(nm) == 1: nm.append(nm[0]) # set second element to same val as first
if len(nm) != 2: raise ValueError("`nm` must have two indices: (nm_min, nm_max)") # check args for errors
if (nm[0] < 0) or (nm[1] < 0):
ErrStr = "set_Nm(): m-order must be 0 or greater."
raise ValueError(ErrStr)
global global_Nm
global_Nm = nm
def get_Np():
'''For General Fiber Solver (GFS) or Fibre FDM solver, set (min,max) number of p-order (polarization number) modes to solve for. Use set_N() to determine m-order (axial quantum number) modes.
See Sec. 5.7.3 of the FimmWave manual.
Returns
-------
np : 2-element tuple
(np_min, np_max): min & max p-order. Defaults to (1,2) if unset.'''
global global_Np
try:
global_Np
except NameError:
global_Np = (1,2) # the default value
return (global_Np[0],global_Np[1])
def set_Np(np):
'''For General Fiber Solver (GFS) or Fibre FDM solver, set max number of p-order (axial/azimuthal quantum number) modes to solve for.
This is the main polarization number, `p`. For GFS, p=1 means selecting mode with Ex=0, while p=2 means Ey=0.
For Semi-Vectorial GFS, `p` can also be 3 or 4 if m>=1 (see manual for info).
See Sec. 5.7.3 of the FimmWave manual, or `help(pyfimm.set_mode_solver)` as some cylindrical mode solvers have constraints on the p-order.
Parameters
----------
np : integer, OR tuple/list (any iterable) of 2 integers
(min_np, max_np): min & max p-orders to solve. Defaults to (1,2) if unset.
Examples
--------
>>> set_Np(1)
Solve only for 1st p-order, which is main E-field in Ey I think.
>>> set_Np( [1,4] )
Solve for p-orders 1-4.
'''
if isinstance(np, int):
np = list([np]) # convert an integer to list
else:
np = [int(x) for x in np] # make sure all args convert to integer, and generate list
if len(np) == 1: np.append(np[0]) # set second element to same val as first
if len(np) != 2: raise ValueError("`np` must have two indices: (np_min, np_max)") # check args for errors
if (np[0] < 1) or (np[1] > 4):
ErrStr = "set_Np(): p-order must be betwen 1 and 4."
raise ValueError(ErrStr)
global global_Np
global_Np = np
|
{"/pyfimm/proprietary/ExampleModule.py": ["/pyfimm/__globals.py", "/pyfimm/__Waveguide.py"], "/example3 - Cyl DFB Cavity v4.py": ["/pyfimm/__init__.py"]}
|
30,442
|
rossng/clrs-sorts
|
refs/heads/master
|
/counting_sort.py
|
def counting_sortw(A, get_key=lambda x: x):
# get_key is a function that takes a list element and returns its key
A2 = list(A)
max_key = max([get_key(e) for e in A2])
return counting_sort(A2, max_key, get_key)
def counting_sort(A, max_key, get_key):
# All numbers in A are in the range 0, .. , k
B = [None]*len(A)
C = [0]*(max_key+1)
# Populate C with the count of each element
for i in range(0,len(A)):
C[get_key(A[i])] += 1
# Convert the counts in C to a cumulative sum
for i in range(1,len(C)):
C[i] += C[i-1]
# Sort from A into B using the positions calculated in C
for i in reversed(range(0, len(A))):
B[C[get_key(A[i])]-1] = A[i]
C[get_key(A[i])] -= 1
return B
arr1 = [2,5,3,0,2,3,0,3]
arr2 = [6,0,2,0,1,3,4,6,1,3,2]
arr3 = [(2,'fred'),(5,'bob'),(3,'steve'),(0,'john'),(2,'gary')
,(3,'tarquin'),(0,'albert'),(3,'zeus')]
|
{"/radix_sort.py": ["/counting_sort.py"]}
|
30,443
|
rossng/clrs-sorts
|
refs/heads/master
|
/quicksort.py
|
import random
def quicksortw(A, randomised=False):
A2 = list(A)
return randomised_quicksort(A2, 0, len(A2)-1) if randomised else quicksort(A2, 0, len(A2)-1)
def quicksort(A, p, r):
if p < r:
# Partition the array and return the position of the pivot
q = partition(A, p, r)
# Recursively quicksort each half of the array either side of the pivot
quicksort(A, p, q-1)
quicksort(A, q+1, r)
return A
def partition(A, p, r):
# Get the pivot element
x = A[r]
# Elements with index : p <= index <= i are smaller than or equal to the pivot
i = p-1
# Loop through the as-yet unpartitioned elements
for j in range(p, r):
# If current element is smaller than the pivot
if A[j] <= x:
# Swap the element into the 'smaller than the pivot' section and
# increment the upper boundary of that section
i = i+1
A[i], A[j] = A[j], A[i]
# Finally, swap the pivot in place between the two sections and return its index
A[i+1], A[r] = A[r], A[i+1]
return i+1
def randomised_quicksort(A, p, r):
if p < r:
q = randomised_partition(A, p, r)
quicksort(A, p, q-1)
quicksort(A, q+1, r)
return A
def randomised_partition(A, p, r):
# Before calling partition, swap a random element into the last position (i.e. the pivot)
i = random.randrange(p, r+1)
A[r], A[i] = A[i], A[r]
return partition(A, p, r)
arr1 = [2,8,7,1,3,5,6,4]
arr2 = [13,19,9,5,12,8,7,4,21,2,6,11]
|
{"/radix_sort.py": ["/counting_sort.py"]}
|
30,444
|
rossng/clrs-sorts
|
refs/heads/master
|
/radix_sort.py
|
from counting_sort import counting_sort
def string_radix_sortw(A):
A2 = list(A)
d = len(A2[0])
return string_radix_sort(A2, d)
def string_radix_sort(A, d):
"""Perform a radix sort on a list A of uppercase strings of length d"""
for i in reversed(range(0, d)):
A = counting_sort(A, 26, lambda s: ord(s[i])-65)
return A
arr1 = ['COW', 'DOG', 'SEA', 'RUG', 'ROW', 'MOB', 'BOX', 'TAB', 'BAR',
'EAR', 'TAR', 'DIG', 'BIG', 'TEA', 'NOW', 'FOX']
|
{"/radix_sort.py": ["/counting_sort.py"]}
|
30,445
|
rossng/clrs-sorts
|
refs/heads/master
|
/bucket_sort.py
|
import math
import itertools
def bucket_sortw(A):
A2 = list(A)
return bucket_sort(A2)
def bucket_sort(A):
"""Sorts a list of numbers in the range [0,1)"""
n = len(A)
B = [[] for x in range(0,n)]
for i in range(0,n):
B[math.floor(n*A[i])].insert(0, A[i])
for b in B:
b.sort() # technically, we should probably use insertion sort
return list(itertools.chain(*B))
arr1 = [0.78, 0.17, 0.39, 0.26, 0.72, 0.94, 0.21, 0.12, 0.23, 0.68]
arr2 = [0.79, 0.13, 0.16, 0.64, 0.39, 0.20, 0.89, 0.53, 0.71, 0.42]
|
{"/radix_sort.py": ["/counting_sort.py"]}
|
30,446
|
rossng/clrs-sorts
|
refs/heads/master
|
/insertion_sort.py
|
def insertion_sortw(A):
A2 = list(A)
return insertion_sort(A2)
def insertion_sort(A):
for j in range(1, len(A)):
# Get the next unsorted element
key = A[j]
i = j-1
# Try each position in the sorted section, from high to low
while i >= 0 and A[i] > key:
# If key is still smaller, shift the array element to the right
A[i+1] = A[i]
i = i-1
# When key is no longer smaller, insert it
A[i+1] = key
return A
arr1 = [5,2,4,6,1,3]
arr2 = [3,41,52,26,38,57,9,49]
|
{"/radix_sort.py": ["/counting_sort.py"]}
|
30,448
|
xosmig/tlcp
|
refs/heads/main
|
/tlcp.py
|
#!/usr/bin/env python3
import argparse
import os
import sys
import glob
import shutil
import antlr4
from typing import List
from antlrgenerated.tlcpLexer import tlcpLexer
from antlrgenerated.tlcpParser import tlcpParser
from visitor import TlcpVisitor
EXTENSION = '.meta.cfg'
GENERATED_MODELS_DIR = 'tlcp_models'
ANTLR_HIDDEN_CHANNEL = 2
# def dir_path(string):
# if os.path.isdir(string):
# return string
# else:
# raise NotADirectoryError(string)
# def file_path(string):
# if os.path.isfile(string):
# return string
# elif os.path.isdir(string):
# return IsADirectoryError(string)
# else:
# raise FileNotFoundError(string)
def get_metacfg_files(dir_name: str) -> List[str]:
return glob.glob(os.path.join(dir_name, "*{ext}".format(ext=EXTENSION)))
def process_arguments(args):
files = []
for f in args.files:
if not os.path.exists(f):
print("File '{}' not found.".format(f), file=sys.stderr)
sys.exit(2)
if os.path.isfile(f):
if not f.endswith(EXTENSION):
print("File '{}' is not a {ext} file.".format(f, ext=EXTENSION), file=sys.stderr)
sys.exit(2)
files += [f]
if os.path.isdir(f):
files += get_metacfg_files(f)
if not files:
print("Found no {ext} files.".format(ext=EXTENSION), file=sys.stderr)
sys.exit(0)
args.files = files
def copy_tla_files(dir_from: str, dir_to: str):
for tla_file in glob.glob(os.path.join(dir_from, "*.tla")):
if os.path.isfile(tla_file):
shutil.copyfile(tla_file, os.path.join(dir_to, os.path.basename(tla_file)))
def create_tla_file(cfg_file: str, extend_module: str):
assert cfg_file.endswith(".cfg")
dir_path = os.path.dirname(cfg_file)
module_name = os.path.basename(cfg_file)[:-len(".cfg")]
tla_file = os.path.join(dir_path, module_name + ".tla")
with open(tla_file, "w") as f:
f.write("---- MODULE {} ----\n".format(module_name))
f.write("\n")
f.write("EXTENDS {}, TLC\n".format(extend_module))
f.write("\n")
f.write("====\n")
def process_file(file, args):
assert file.endswith(EXTENSION)
metacfg_dir = os.path.dirname(file)
metacfg_name = os.path.basename(file)[:-len(EXTENSION)]
models_dir = os.path.join(metacfg_dir, GENERATED_MODELS_DIR, metacfg_name)
if args.cleanup and os.path.isdir(models_dir):
shutil.rmtree(models_dir)
os.makedirs(models_dir, exist_ok=True)
input_stream = antlr4.FileStream(file)
lexer = tlcpLexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = tlcpParser(stream)
tree = parser.config()
if parser.getNumberOfSyntaxErrors() > 0:
print("Skipping file '{}' due to syntax errors.".format(file), file=sys.stderr, flush=True)
return
configs = TlcpVisitor(basic_name=metacfg_name).visit(tree)
already_copied_tla_files_to = set()
for config in configs:
if config.path:
cfg_dir = os.path.join(models_dir, config.path)
os.makedirs(cfg_dir, exist_ok=True)
else:
cfg_dir = models_dir
if cfg_dir not in already_copied_tla_files_to:
# copy all tla dependencies
copy_tla_files(dir_from=metacfg_dir, dir_to=cfg_dir)
already_copied_tla_files_to.add(cfg_dir)
cfg_file = os.path.join(cfg_dir, config.name + ".cfg")
with open(cfg_file, mode="w") as f:
ret = f.write(config.text)
assert ret == len(config.text)
create_tla_file(cfg_file, extend_module=metacfg_name)
def main():
parser = argparse.ArgumentParser(
description="A preprocessor for TLC configuration files."
"Converts {ext} files to TLC .cfg files.".format(ext=EXTENSION))
parser.add_argument(
"files",
metavar="FILE",
type=str,
nargs="+",
help="Path to a {ext} file or a directory with {ext} files.".format(ext=EXTENSION))
parser.add_argument(
"--cleanup", "-c",
default=False,
action="store_true",
help="Removes the old {dir} folder before generating new models.".format(dir=GENERATED_MODELS_DIR))
# TODO: add auto-generated "run_all.sh" scripts in sub-folders.
# parser.add_argument(
# "--no–bat",
# default=False,
# action="store_true",
# help="Don't create .bat script files.")
# parser.add_argument(
# "--no-sh",
# default=False,
# action="store_true",
# help="Don't create .sh script files.")
args = parser.parse_args()
process_arguments(args)
if args.cleanup and os.path.exists(GENERATED_MODELS_DIR):
shutil.rmtree(GENERATED_MODELS_DIR)
for file in args.files:
process_file(file, args)
if __name__ == "__main__":
main()
|
{"/tlcp.py": ["/visitor.py"]}
|
30,449
|
xosmig/tlcp
|
refs/heads/main
|
/visitor.py
|
from antlrgenerated.tlcpParser import tlcpParser
from antlrgenerated.tlcpVisitor import tlcpVisitor
import antlr4
from typing import Optional, List
from functools import reduce
import os
class Config:
def __init__(self, name: str, path: str, text: str):
self.name = name
self.text = text
self.path = path
def __add__(self, other: 'Config'):
return Config(
self.name + other.name,
os.path.join(self.path, other.path),
self.text + other.text)
def add_name_prefix(self, pref: str, include_in_path: bool) -> 'Config':
return Config(
pref + "_" + self.name if self.name else pref,
os.path.join(pref, self.path) if include_in_path else self.path,
self.text
)
@staticmethod
def empty_config():
return Config("", "", "")
# noinspection PyPep8Naming
class TlcpVisitor(tlcpVisitor):
def __init__(self, basic_name):
self.basic_name = basic_name
self.current_family = None
self.families = []
def visitWithBuilder(self, node, builders):
node.tlcBuilders = builders
node.accept(self)
def visitConfig(self, ctx: tlcpParser.ConfigContext) -> List[Config]:
families_commands = get_typed_children(ctx, tlcpParser.FamiliesContext)
if not families_commands:
return self.visitConfigWithFamily(ctx, family=None)
assert(len(families_commands) == 1)
families_cmd = families_commands[0]
self.families = [family.getText()
for family in get_typed_children(families_cmd, tlcpParser.FamilyNameContext)]
for family in self.families:
if "_" in family:
raise RuntimeError("Family name '{}' contains prohibited symbol '_'.".format(family))
return sum((self.visitConfigWithFamily(ctx, family) for family in self.families), start=[])
def visitConfigWithFamily(self, ctx: tlcpParser.ConfigContext, family: Optional[str]) -> List[Config]:
self.current_family = family
block = get_typed_child(ctx, tlcpParser.BlockContext)
name_prefix = self.basic_name + "_" + family if family else self.basic_name
return [conf.add_name_prefix(name_prefix, include_in_path=True)
for conf in self.visit(block)]
def visitBlock(self, ctx: tlcpParser.BlockContext):
return reduce(lambda agg, child: [prefix + suffix for prefix in agg for suffix in self.visit(child)],
ctx.getChildren(),
[Config.empty_config()])
def visitFamilyStatement(self, ctx: tlcpParser.FamilyStatementContext) -> List[Config]:
statement_families = self.get_family_statement_families(ctx)
for family in statement_families:
if family not in self.families:
raise RuntimeError("Unknown family '{}'".format(family))
if self.families and self.current_family not in statement_families:
return [Config.empty_config()]
return self.visit(get_typed_child(ctx, tlcpParser.StatementContext))
def visitBlockWIthBeginEnd(self, ctx: tlcpParser.BlockWIthBeginEndContext):
return self.visit(get_typed_child(ctx, tlcpParser.BlockContext))
def visitOneOf(self, ctx: tlcpParser.OneOfContext) -> List[Config]:
options = get_typed_children(ctx, tlcpParser.OptionContext)
with_subfolders = bool(get_token_children(ctx, tlcpParser.ONE_OF_WITH_SUBFOLDERS))
for option in options:
# We use this little hack to pass information down the tree.
option.tlcp_with_subfolders = with_subfolders
return sum((self.visit(option) for option in options), start=[])
def visitOption(self, ctx: tlcpParser.OptionContext) -> List[Config]:
name = get_typed_child(ctx, tlcpParser.OptionNameContext).getText()
if "_" in name:
raise RuntimeError("Option name '{}' contains prohibited symbol '_'.".format(name))
block = get_typed_child(ctx, tlcpParser.BlockContext)
# We use the ability of python to create fields on the fly to pass information down the tree.
# noinspection PyUnresolvedReferences
return [conf.add_name_prefix(name, include_in_path=ctx.tlcp_with_subfolders)
for conf in self.visit(block)]
def visitTlcStatement(self, ctx: tlcpParser.TlcStatementContext) -> List[Config]:
# just returns the text used for the original TLC statement
assert ctx.start.getInputStream() is ctx.stop.getInputStream()
input_stream = ctx.start.getInputStream()
text = input_stream.getText(ctx.start.start, ctx.stop.stop) + "\n"
return [Config("", "", text)]
def get_family_statement_families(self, statement: tlcpParser.FamilyStatementContext):
statement_families = [
family_ctx.getText()
for family_ctx in get_typed_children(statement, tlcpParser.FamilyNameContext)]
# statements without families explicitly specified apply to all families
if not statement_families:
statement_families = self.families
return statement_families
def visitTerminal(self, node):
raise AssertionError("Unreachable state: some statement was not processed.")
# Returns a list of objects of type tp.
def get_typed_children(ctx: antlr4.ParserRuleContext, tp: type) -> list:
return [child for child in ctx.getChildren() if isinstance(child, tp)]
# Return an object of type tp.
def get_typed_child(ctx: antlr4.ParserRuleContext, tp: type):
lst = get_typed_children(ctx, tp)
if not lst:
raise AssertionError("Object has no children of type '{}'.".format(tp))
if len(lst) > 1:
raise AssertionError("Object has multiple children of type '{}'.".format(tp))
return lst[0]
def get_token_children(ctx: antlr4.ParserRuleContext, token_tp: int) -> List[antlr4.Token]:
# noinspection PyUnresolvedReferences
return [child
for child in ctx.getChildren()
if isinstance(child, antlr4.TerminalNode) and child.getSymbol().type == token_tp]
def get_token_child(ctx: antlr4.ParserRuleContext, token_tp: int) -> antlr4.Token:
lst = get_token_children(ctx, token_tp)
if not lst:
raise AssertionError("Object has no token children of type '{}'.".format(token_tp))
if len(lst) > 1:
raise AssertionError("Object has multiple token children of type '{}'.".format(token_tp))
return lst[0]
|
{"/tlcp.py": ["/visitor.py"]}
|
30,450
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_SiteScraperFactory.py
|
from bs4 import BeautifulSoup as bs
import requests
from classes.class_TutSiteScraper import TutSiteScraper
from classes.class_TvrSiteScraper import TvrSiteScraper
from classes.class_NewsruSiteScraper import NewsruSiteScraper
from classes.class_RiaSiteScraper import RiaSiteScraper
from classes.class_Article import Article
class SiteScraperFactory:
def scrap_sites(self):
scr = NewsruSiteScraper("https://www.newsru.com/world")
scr.scraper()
self.all_articles = []
for i in range(0, len(scr.title)):
self.all_articles.append(Article(scr.name, scr.title[i], scr.link[i], scr.text[i]))
scr = TutSiteScraper("https://news.tut.by/world")
scr.scraper()
for i in range(0, len(scr.title)):
self.all_articles.append(Article(scr.name, scr.title[i], scr.link[i], scr.text[i]))
scr = TvrSiteScraper("https://www.tvr.by/news/v_mire/")
scr.scraper()
for i in range(0, len(scr.title)):
self.all_articles.append(Article(scr.name, scr.title[i], scr.link[i], scr.text[i]))
# С сайта парсится информация, однако долго, поэтому я закоментил данную операцию, если хотите проверить парсинг сайте напишите [GET] запрос /news/Ria
# Долгий парсинг из-за того, что текст статьи можно взять только перейдя по ссылке статьи
"""scr=RiaSiteScraper("https://ria.ru/world/")
scr.scraper()
for i in range(0,len(scr.title)):
self.all_articles.append(Article(scr.name,scr.title[i],scr.link[i],scr.text[i]))"""
def scrap_site(self, site_name):
if site_name == "Newsru":
newsruscr = NewsruSiteScraper("https://www.newsru.com/world")
newsruscr.scraper()
self.site_articles = []
for i in range(0, len(newsruscr.title)):
self.site_articles.append(
Article(newsruscr.name, newsruscr.title[i], newsruscr.link[i], newsruscr.text[i]))
if site_name == "Tut":
tutscr = TutSiteScraper("https://news.tut.by/world")
tutscr.scraper()
self.site_articles = []
for i in range(0, len(tutscr.title)):
self.site_articles.append(Article(tutscr.name, tutscr.title[i], tutscr.link[i], tutscr.text[i]))
if site_name == "Tvr":
tvrscr = TvrSiteScraper("https://www.tvr.by/news/v_mire/")
tvrscr.scraper()
self.site_articles = []
for i in range(0, len(tvrscr.title)):
self.site_articles.append(Article(tvrscr.name, tvrscr.title[i], tvrscr.link[i], tvrscr.text[i]))
if site_name == "Ria":
riascr = RiaSiteScraper("https://ria.ru/world/")
riascr.scraper()
self.site_articles = []
for i in range(0, len(riascr.title)):
self.site_articles.append(Article(riascr.name, riascr.title[i], riascr.link[i], riascr.text[i]))
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,451
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_NewsruSiteScraper.py
|
from bs4 import BeautifulSoup as bs
import requests
from classes.class_SiteScraper import SiteScraper
class NewsruSiteScraper(SiteScraper):
def __init__(self, link):
self.site_link = link
self.name = "Newsru"
def scraper(self):
html = requests.get(self.site_link)
soup = bs(html.content, "html.parser")
self.title = []
self.link = []
self.text = []
for element in soup.select(".index-news-item"):
content = element.find("a", {"class": "index-news-title"})
self.title.append(content.text.strip())
self.link.append(self.site_link[0:22] + content.attrs["href"])
self.text.append(element.find("a", {"class": "index-news-text"}).text.strip())
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,452
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_TutSiteScraper.py
|
from bs4 import BeautifulSoup as bs
import requests
from classes.class_SiteScraper import SiteScraper
class TutSiteScraper(SiteScraper):
def __init__(self, link):
self.site_link = link
self.name = "Tut"
def scraper(self):
html = requests.get(self.site_link)
soup = bs(html.content, "html.parser")
self.title = []
self.link = []
self.text = []
for block in soup.select(".news-section.m-rubric"):
for element in block.select(".news-entry.big.annoticed.time.ni"):
link = element.select(".entry__link")
title = element.find("span", {"class": "entry-head _title"}).text
text = element.find("span", {"class": "entry-note"}).text
self.title.append(title)
self.link.append(link[0].attrs["href"])
self.text.append(text)
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,453
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_SiteScraper.py
|
from abc import ABC, abstractmethod
class SiteScraper(ABC):
@abstractmethod
def __init__(self, link):
pass
@abstractmethod
def scraper(self):
pass
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,454
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_RiaSiteScraper.py
|
from bs4 import BeautifulSoup as bs
import requests
import lxml
from classes.class_SiteScraper import SiteScraper
class RiaSiteScraper(SiteScraper):
def __init__(self, link):
self.site_link = link
self.name = "Ria"
def scraper(self):
html = requests.get(self.site_link)
soup = bs(html.content, "lxml")
self.title = []
self.link = []
self.text = []
for element in soup.select(".list-item"):
content = element.select(".list-item__content > a")
self.title.append(content[1].text)
self.link.append(content[1].attrs["href"])
html_article = requests.get(content[1].attrs["href"])
soup_article = bs(html_article.content, "lxml")
self.text.append(soup_article.find('div', {"class": "article__text"}).text)
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,455
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/NewsScraper.py
|
from flask import Flask, jsonify
from classes.class_SiteScraperFactory import SiteScraperFactory
app = Flask(__name__)
@app.route("/news")
def get_all_articles():
newsscraper = SiteScraperFactory()
newsscraper.scrap_sites()
articles = []
for element in newsscraper.all_articles:
articles.append({"name": element.name, "title": element.title, "link": element.link, "text": element.text})
return jsonify({"news": articles}), 200
@app.route("/news/<string:site>")
def get_site_article(site: str):
try:
newsscraper = SiteScraperFactory()
newsscraper.scrap_site(site)
articles = []
for element in newsscraper.site_articles:
articles.append({"name": element.name, "title": element.title, "link": element.link, "text": element.text})
return jsonify({site: articles}), 201
except:
return "500 error", 500
@app.errorhandler(500)
def internal_error(error):
return "500 error", 500
@app.errorhandler(404)
def not_found(error):
return "404 error", 404
if __name__ == "__main__":
app.run()
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,456
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_TvrSiteScraper.py
|
from bs4 import BeautifulSoup as bs
import requests
from classes.class_SiteScraper import SiteScraper
class TvrSiteScraper(SiteScraper):
def __init__(self, link):
self.site_link = link
self.name = "Tvr"
def scraper(self):
self.title = []
self.link = []
self.text = []
html = requests.get(self.site_link)
soup = bs(html.content, "html.parser")
for element in soup.select(".text"):
title = element.select(".title >a")
self.link.append(self.site_link + title[0].attrs["href"])
title = title[0].text
title = title.strip()
self.title.append(title)
text = element.find("p").text
self.text.append(text)
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,457
|
LenetsEgor/Projects_Python
|
refs/heads/master
|
/classes/class_Article.py
|
class Article:
def __init__(self, name, title, link, text):
self.name = name
self.title = title
self.link = link
self.text = text
|
{"/classes/class_SiteScraperFactory.py": ["/classes/class_TutSiteScraper.py", "/classes/class_TvrSiteScraper.py", "/classes/class_NewsruSiteScraper.py", "/classes/class_RiaSiteScraper.py", "/classes/class_Article.py"], "/classes/class_NewsruSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_TutSiteScraper.py": ["/classes/class_SiteScraper.py"], "/classes/class_RiaSiteScraper.py": ["/classes/class_SiteScraper.py"], "/NewsScraper.py": ["/classes/class_SiteScraperFactory.py"], "/classes/class_TvrSiteScraper.py": ["/classes/class_SiteScraper.py"]}
|
30,463
|
Haifasm/Fyyur
|
refs/heads/master
|
/models.py
|
from app import db
class Venue(db.Model):
__tablename__ = 'Venue'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
address = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120))
website = db.Column(db.String(500))
genres = db.Column('genres', db.ARRAY(db.String), nullable=False)
facebook_link = db.Column(db.String(500))
image_link = db.Column(db.String(500))
seeking_talent = db.Column(db.Boolean, nullable=True, default=False)
seeking_description = db.Column(db.String(1000))
shows = db.relationship('Show', backref='pVenue', lazy=True, cascade='all, delete')
def create(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.update(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
class Artist(db.Model):
__tablename__ = 'Artist'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120))
website = db.Column(db.String(500))
genres = db.Column('genres', db.ARRAY(db.String), nullable=False)
facebook_link = db.Column(db.String(500), nullable=False)
image_link = db.Column(db.String(500))
seeking_venue = db.Column(db.Boolean, default=True)
seeking_description = db.Column(db.String(1000))
shows = db.relationship('Show', backref='pArtist', lazy=True, cascade='all, delete')
def create(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.update(self)
db.session.commit()
class Show(db.Model):
__tablename__ = 'Show'
id = db.Column(db.Integer, primary_key=True)
start_time = db.Column(db.DateTime, nullable=False)
artist_id = db.Column(db.Integer, db.ForeignKey('Artist.id', ondelete='CASCADE'), nullable=False)
venue_id = db.Column(db.Integer, db.ForeignKey('Venue.id', ondelete='CASCADE'), nullable=False)
def create(self):
db.session.add(self)
db.session.commit()
|
{"/models.py": ["/app.py"], "/app.py": ["/models.py"]}
|
30,464
|
Haifasm/Fyyur
|
refs/heads/master
|
/app.py
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import json
import dateutil.parser
from flask_babel import Babel
from flask_migrate import Migrate
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for, abort
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form
from forms import *
import sys
import datetime
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
moment = Moment(app)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db, compare_type=True)
#----------------------------------------------------------------------------#
# Models.
#----------------------------------------------------------------------------#
from models import *
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(value)
if format == 'full':
format = "EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format = "EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format, locale='en')
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# Venues
# ----------------------------------------------------------------
#List of all venues
@app.route('/venues')
def venues():
result = []
#distinct city and state
locations = Venue.query.distinct('city','state').all()
for loc in locations:
venues = Venue.query.filter(Venue.city == loc.city, Venue.state == loc.state).all()
record = {
'city': loc.city,
'state': loc.state,
'venues': venues,
}
result.append(record)
return render_template('pages/venues.html', areas=result)
#search venues with partial string search and case-insensitive.
@app.route('/venues/search', methods=['POST'])
def search_venues():
# main.html -> name="search_term"
search_term = request.form.get('search_term', '')
data = []
counter = 0
#ILIKE allows you to perform case-insensitive pattern matching
results = Venue.query.filter(Venue.name.ilike(f'%{search_term}%')).all()
for result in results:
counter += 1
data.append({"id": result.id, "name": result.name})
response={ "count": counter, "data": data }
return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))
# show venue page with the given venue_id
@app.route('/venues/<int:venue_id>')
def show_venue(venue_id):
venue = Venue.query.get(venue_id)
if venue is None:
abort(404)
shows = Show.query.filter_by(venue_id = venue_id).all()
upcoming_shows = []
past_shows = []
upcoming_shows_count = 0
past_shows_count = 0
current_time = datetime.datetime.now()
for show in shows:
if show.start_time >= current_time:
upcoming_shows_count += 1
record = {
"artist_id": show.artist_id,
"artist_name": Artist.query.get(show.artist_id).name,
"artist_image_link": Artist.query.get(show.artist_id).image_link,
"start_time": str(show.start_time),
}
upcoming_shows.append(record)
else:
past_shows_count += 1
past_show_record = {
"artist_id": show.artist_id,
"artist_name": Artist.query.get(show.artist_id).name,
"artist_image_link": Artist.query.get(show.artist_id).image_link,
"start_time": str(show.start_time),
}
past_shows.append(past_show_record)
data = {
"id": venue_id,
"name": venue.name,
"genres": venue.genres,
"address": venue.address,
"city": venue.city,
"state": venue.state,
"phone": venue.phone,
"website": venue.website,
"facebook_link": venue.facebook_link,
"seeking_talent": venue.seeking_talent,
"seeking_description": venue.seeking_description,
"image_link": venue.image_link,
"past_shows": past_shows,
"upcoming_shows": upcoming_shows,
"past_shows_count": past_shows_count,
"upcoming_shows_count": upcoming_shows_count,
}
return render_template('pages/show_venue.html', venue=data)
# Create and Delete Venues
# ----------------------------------------------------------------
@app.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
# Create venue
@app.route('/venues/create', methods=['POST'])
def create_venue_submission():
form = VenueForm()
if form.validate_on_submit():
error = False
try:
name = form.name.data
city = form.city.data
state = form.state.data
address = form.address.data
phone = form.phone.data
website = form.website.data
genres = form.genres.data
image_link = form.image_link.data
facebook_link = form.facebook_link.data
seeking_description = form.seeking_description.data
if seeking_description:
seeking_talent = True
else:
seeking_talent = False
venue = Venue(name=name, city=city, state=state,
address=address, phone=phone, image_link=image_link,
facebook_link=facebook_link, website=website, genres=genres,
seeking_talent=seeking_talent, seeking_description=seeking_description)
venue.create()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
# TODO: on unsuccessful db insert, flash an error instead.
flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')
else:
flash('Venue ' + request.form['name'] + ' was successfully listed!')
else:
errors_list = []
for error in form.errors.values():
errors_list.append(error[0])
flash('Invalid submission: \n' + ', '.join(errors_list))
return render_template('forms/new_venue.html', form=form)
return render_template('pages/home.html')
# Delete venue
@app.route('/venues/<venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
# TODO: Complete this endpoint for taking a venue_id, and using
# SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.
error = False
try:
venues = Venue.query.get(venue_id)
name = venues.name
venues.delete()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
flash('Error)')
else:
flash('Venue ' +name+' deleted.')
return 'OK'
# Artists
# ----------------------------------------------------------------
#List of all artists alphabitacly
@app.route('/artists')
def artists():
result = []
artists = Artist.query.order_by(Artist.name).all()
for artist in artists:
result.append({"id": artist.id,"name": artist.name})
return render_template('pages/artists.html', artists=result)
# search arists
@app.route('/artists/search', methods=['POST'])
def search_artists():
# main.html -> name="search_term"
search_term = request.form.get('search_term', '')
data = []
counter = 0
#ILIKE allows you to perform case-insensitive pattern matching
results = Artist.query.filter(Venue.name.ilike(f'%{search_term}%')).all()
for result in results:
counter += 1
data.append({"id": result.id, "name": result.name})
response={ "count": counter, "data": data }
return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))
# show artist
@app.route('/artists/<int:artist_id>')
def show_artist(artist_id):
# shows the venue page with the given venue_id
# TODO: replace with real venue data from the venues table, using venue_id
artist = Artist.query.get(artist_id)
if artist is None:
abort(404)
shows = Show.query.filter_by(artist_id = artist_id).all()
past_shows = []
upcoming_shows = []
past_shows_count = 0
upcoming_shows_count = 0
current_time = datetime.datetime.now()
for show in shows:
if(show.start_time >= current_time):
upcoming_shows_count+=1
upcoming_shows.append({
"venue_id": show.venue_id,
"venue_name": Venue.query.get(show.venue_id).name,
"venue_image_link": Venue.query.get(show.venue_id).image_link,
"start_time": str(show.start_time)
})
else:
past_shows.append({
"venue_id": show.venue_id,
"venue_name": Venue.query.get(show.venue_id).name,
"venue_image_link": Venue.query.get(show.venue_id).image_link,
"start_time": str(show.start_time)
})
past_shows_count+=1
data={
"id": artist_id,
"name": artist.name,
"genres": artist.genres,
"city": artist.city,
"state": artist.state,
"phone": artist.phone,
"website": artist.website,
"facebook_link": artist.facebook_link,
"seeking_venue": artist.seeking_venue,
"seeking_description": artist.seeking_description,
"image_link": artist.image_link,
"past_shows": past_shows,
"upcoming_shows": upcoming_shows,
"past_shows_count": past_shows_count,
"upcoming_shows_count": upcoming_shows_count,
}
return render_template('pages/show_artist.html', artist=data)
# Update
# ----------------------------------------------------------------
#edit artist show fields
@app.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
# TODO: populate form with fields from artist with ID <artist_id>
artist = Artist.query.get(artist_id)
form = ArtistForm(obj=artist)
return render_template('forms/edit_artist.html', form=form, artist=artist)
#edit artist submit fields
@app.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
# TODO: take values from the form submitted, and update existing
# artist record with ID <artist_id> using the new attributes
form = ArtistForm(request.form)
artist = Artist.query.get(artist_id)
if form.validate_on_submit():
error = False
try:
artist.name=form.name.data
artist.city=form.city.data
artist.state=form.state.data
artist.phone=form.phone.data
artist.genres=form.genres.data
artist.website=form.website.data
artist.facebook_link=form.facebook_link.data
artist.image_link=form.image_link.data
artist.seeking_venue=form.seeking_venue.data
artist.seeking_description=form.seeking_description.data
artist.update()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
flash('Artist ' + request.form['name'] + ' was not updated.')
else:
flash('Artist ' +request.form['name'] + ' was successfully updated.')
else:
errors_list = []
for error in form.errors.values():
errors_list.append(error[0])
flash('Invalid submission: \n' + ', '.join(errors_list))
return render_template('forms/edit_artist.html', form=form, artist=artist)
return redirect(url_for('show_artist', artist_id=artist_id))
#edit venue show fields
@app.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
# TODO: populate form with values from venue with ID <venue_id>
venue = Venue.query.get(venue_id)
form = VenueForm(obj=venue)
return render_template('forms/edit_venue.html', form=form, venue=venue)
#edit venue submit
@app.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
# TODO: take values from the form submitted, and update existing
# venue record with ID <venue_id> using the new attributes
error = False
venue = Venue.query.get(venue_id)
form = VenueForm(request.form)
if form.validate_on_submit():
try:
venue.name=form.name.data
venue.city=form.city.data
venue.state=form.state.data
venue.address=form.address.data
venue.phone=form.phone.data
venue.genres=form.genres.data
venue.facebook_link=form.facebook_link.data
venue.image_link=form.image_link.data
venue.website=form.website.data
venue.seeking_description=form.seeking_description.data
if venue.seeking_description:
venue.seeking_talent = True
else:
venue.seeking_talent = False
venue.update()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
flash('Error! Venue ' + request.form['name'] + ' was not updated.')
else:
flash( 'Venue ' + request.form['name'] + ' was successfully updated.')
else:
errors_list = []
for error in form.errors.values():
errors_list.append(error[0])
flash('Invalid submission: \n' + ', '.join(errors_list))
return render_template('forms/edit_venue.html', form=form)
return redirect(url_for('show_venue', venue_id=venue_id))
# Create Artist
# ----------------------------------------------------------------
@app.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@app.route('/artists/create', methods=['POST'])
def create_artist_submission():
# called upon submitting the new artist listing form
# TODO: insert form data as a new Venue record in the db, instead
# TODO: modify data to be the data object returned from db insertion
form = ArtistForm()
if form.validate_on_submit():
error = False
try:
name = form.name.data
city = form.city.data
state = form.state.data
phone = form.phone.data
genres = form.genres.data
website = form.website.data
image_link = form.image_link.data
facebook_link = form.facebook_link.data
seeking_description = form.seeking_description.data
if seeking_description:
seeking_venue = True
else:
seeking_venue = False
artist = Artist(name=name, city=city, state=state,
phone=phone, genres=genres, image_link=image_link, website=website,
facebook_link=facebook_link, seeking_venue=seeking_venue,
seeking_description=seeking_description)
artist.create()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
# TODO: on unsuccessful db insert, flash an error instead.
flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')
else:
flash('Artist ' + request.form['name'] + ' was successfully listed!')
else:
errors_list = []
for error in form.errors.values():
errors_list.append(error[0])
flash('Invalid submission: \n' + ', '.join(errors_list))
return render_template('forms/new_artist.html', form=form)
return render_template('pages/home.html')
# Shows
# ----------------------------------------------------------------
#list of all shows
@app.route('/shows')
def shows():
# displays list of shows at /shows
#by date
data = []
shows = Show.query.order_by('start_time').all()
for show in shows:
record = {
"venue_id": show.venue_id,
"venue_name": Venue.query.filter_by(id=show.venue_id).first().name,
"artist_id":show.artist_id,
"artist_name": Artist.query.filter_by(id=show.artist_id).first().name,
"artist_image_link": Artist.query.filter_by(id=show.artist_id).first().image_link,
"start_time": format_datetime(str(show.start_time))
}
data.append(record)
return render_template('pages/shows.html', shows=data)
@app.route('/shows/create')
def create_shows():
# renders form. do not touch.
form = ShowForm()
return render_template('forms/new_show.html', form=form)
#create show
@app.route('/shows/create', methods=['POST'])
def create_show_submission():
# called to create new shows in the db, upon submitting new show listing form
# TODO: insert form data as a new Show record in the db, instead
form = ShowForm()
if form.validate_on_submit():
error = False
try:
artist_id = form.artist_id.data
venue_id = form.venue_id.data
start_time = form.start_time.data
show = Show(artist_id=artist_id, venue_id=venue_id, start_time=start_time)
show.create()
except Exception as e:
error = True
db.session.rollback()
print(f'Error ==> {e}')
finally:
db.session.close()
if error:
# TODO: on unsuccessful db insert, flash an error instead.
flash('An error occurred. Show could not be saved.')
else:
flash('Show was successfully saved.')
else:
errors_list = []
for error in form.errors.values():
errors_list.append(error[0])
flash('Invalid submission: \n' + ', '.join(errors_list))
return render_template('forms/new_show.html', form=form)
return render_template('pages/home.html')
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
{"/models.py": ["/app.py"], "/app.py": ["/models.py"]}
|
30,465
|
Haifasm/Fyyur
|
refs/heads/master
|
/migrations/versions/1b677a6dac86_.py
|
"""empty message
Revision ID: 1b677a6dac86
Revises: 56d782f53a45
Create Date: 2020-10-05 12:14:09.802014
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '1b677a6dac86'
down_revision = '56d782f53a45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Show', sa.Column('artist_id', sa.Integer(), nullable=False))
op.add_column('Show', sa.Column('start_time', sa.DateTime(), nullable=False))
op.add_column('Show', sa.Column('venue_id', sa.Integer(), nullable=False))
op.drop_constraint('Show_artistId_fkey', 'Show', type_='foreignkey')
op.drop_constraint('Show_venueId_fkey', 'Show', type_='foreignkey')
op.create_foreign_key(None, 'Show', 'Venue', ['venue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'Show', 'Artist', ['artist_id'], ['id'], ondelete='CASCADE')
op.drop_column('Show', 'venueId')
op.drop_column('Show', 'start')
op.drop_column('Show', 'artistId')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Show', sa.Column('artistId', sa.INTEGER(), autoincrement=False, nullable=False))
op.add_column('Show', sa.Column('start', postgresql.TIMESTAMP(), autoincrement=False, nullable=False))
op.add_column('Show', sa.Column('venueId', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'Show', type_='foreignkey')
op.drop_constraint(None, 'Show', type_='foreignkey')
op.create_foreign_key('Show_venueId_fkey', 'Show', 'Venue', ['venueId'], ['id'])
op.create_foreign_key('Show_artistId_fkey', 'Show', 'Artist', ['artistId'], ['id'])
op.drop_column('Show', 'venue_id')
op.drop_column('Show', 'start_time')
op.drop_column('Show', 'artist_id')
# ### end Alembic commands ###
|
{"/models.py": ["/app.py"], "/app.py": ["/models.py"]}
|
30,510
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/ws_example.py
|
from simple_ws import WebSocket
class WSHandler(WebSocket):
def on_message(self, msg, client):
for client in self.clients:
if client.is_open():
client.write_message(msg)
def on_open(self, client):
print("Client connected!")
def on_close(self, client):
print("Client left...")
def on_ping(self, client):
print("Recieved ping!")
def on_pong(self, client):
print("Recieved pong!")
host = ''
port = 8080
ws = WSHandler(host, port, compression=True, ping=False)
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,511
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/test/test_ws_frame.py
|
import unittest
from simple_ws import WebSocketFrame
class WebSocketFrameTestMethods(unittest.TestCase):
def test_construct_parse(self):
frame = WebSocketFrame(opcode=WebSocketFrame.TEXT, payload="Test", max_frame_size=8192)
data = frame.construct()
# Should only be 1 frame with max_frame_size=8192
if len(data) != 1:
self.fail("More than 1 frame")
data = data[0]
decoded_frame = WebSocketFrame(raw_data=data,ignore_mask=True)
self.assertEqual(frame.opcode, decoded_frame.opcode)
self.assertEqual(frame.payload, bytes(decoded_frame.payload))
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,512
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/simple_ws/__init__.py
|
from .WebSocket import *
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,513
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/test/test_request_parser.py
|
import unittest
from simple_ws import RequestParser
class RequestParserTestMethods(unittest.TestCase):
def test_valid_request(self):
rp = RequestParser()
input_head = "GET / HTTP/1.1\r\n" \
"Host: localhost:8080\r\n" \
"Connection: Upgrade\r\n" \
"Pragma: no-cache\r\n" \
"Cache-Control: no-cache\r\n" \
"Upgrade: websocket\r\n"
rp.parse_request(input_head)
print(rp.headers)
self.assertEqual(rp.headers["Host"], "localhost:8080", "Asserting correct host")
self.assertEqual(rp.headers["Connection"], "Upgrade", "Asserting correct connection")
self.assertEqual(rp.headers["Pragma"], "no-cache", "Asserting correct pragma")
self.assertEqual(rp.headers["Cache-Control"], "no-cache", "Asserting correct cache-control")
self.assertEqual(rp.headers["Upgrade"], "websocket", "Asserting correct upgrade")
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,514
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/simple_ws/WebSocket.py
|
import asyncio
import hashlib
import base64
import struct
import time
import zlib
loop = asyncio.get_event_loop()
class RequestParser:
ws_const = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def __init__(self, req=None):
self.headers = {}
self.body = ""
if req is not None:
self.parse_request(req)
def parse_request(self, req):
data = req.split("\r\n\r\n")
headers = data[0]
self.body = "\r\n\r\n".join(data[1:])
for line in headers.split("\r\n"):
try:
header_line = line.split(":")
if (len(header_line) < 2):
raise Exception
key = header_line[0].strip()
if key == "Sec-WebSocket-Extensions":
l = header_line[1].split(";")
extensions = list(map(lambda line: line.strip().lower(), l))
self.headers[key] = extensions
self.headers[key] = ":".join(header_line[1:]).strip()
except:
if "GET" in line:
self.headers["HTTP"] = line.lower()
else:
self.headers[line] = None
def does_support_compression(self):
try:
extensions = self.headers["Sec-WebSocket-Extensions"]
if "permessage-deflate" in extensions:
return True
except KeyError:
pass
return False
def is_valid_request(self, header):
try:
assert "get" in header["HTTP"].lower()
assert header["Host"] is not None
assert header["Upgrade"].lower() == "websocket"
assert header["Connection"].lower() == "upgrade"
assert header["Sec-WebSocket-Key"] is not None
assert int(header["Sec-WebSocket-Version"]) == 13
except KeyError as e:
raise AssertionError(str(e.args) + " is missing from upgrade request")
return True
@staticmethod
def create_update_header(key, compression=False):
const = RequestParser.ws_const
m = hashlib.sha1()
m.update(str.encode(key))
m.update(str.encode(const))
hashed = m.digest()
key = base64.b64encode(hashed)
header = "HTTP/1.1 101 Switching Protocols\r\n"
header += "Upgrade: websocket\r\n"
header += "Connection: Upgrade\r\n"
if compression:
header += "Sec-WebSocket-Extensions: permessage-deflate; client_max_window_bits=8\r\n"
header += "Sec-WebSocket-Accept: " + key.decode("utf-8") + "\r\n\r\n"
return header
class Decompressor:
def __init__(self):
self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
def decompress(self, message):
message.extend(b'\x00\x00\xff\xff')
decompressor = self.decompressor
message = decompressor.decompress(message)
return message
class Compressor:
def __init__(self):
self.compressor = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS, 8)
def compress(self, message):
self.compressor.compress(message)
message = self.compressor.flush(zlib.Z_SYNC_FLUSH)
assert message.endswith(b'\x00\x00\xff\xff')
return message[:-4]
class WebSocketFrame:
# RFC-specific opcodes
CONTINUOUS = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
def has_mask(self):
return self.mask is not None
def __init__(self, opcode=TEXT, payload="", mask=None, raw_data=None, max_frame_size=8192, compression=False,
ignore_mask=False):
self.opcode = opcode
if opcode is WebSocketFrame.TEXT and payload:
self.payload = str.encode(payload)
else:
self.payload = payload
self.mask = mask
self.incomplete_message = False
self.frame_size = 0
self.max_frame_size = max_frame_size
self.__compression = compression
self.compressor = Compressor()
self.__ignore_mask = ignore_mask # Used for unit test
# Parse message if raw_data isn't None
if raw_data is not None:
self.__parse(raw_data)
"""
Desc: Creates a list of frames with data to send
Input:
- opcode: int: 0 = Continous message, 1 = Msg is text, 2 = Msg is binary, 8 = Close, 9 = ping, 10 = pong
- fin: bool: True = last message, False = more messages to come
- msg: data to be sendt
"""
def construct(self):
frames = []
if self.__compression and self.payload:
self.payload = self.compressor.compress(self.payload)
l = len(self.payload)
frame_num = 0
while l >= 0:
finbit = 128 if (l <= self.max_frame_size) else 0
opcode = self.opcode if (frame_num is 0) else WebSocketFrame.CONTINUOUS
start = self.max_frame_size * frame_num
end = min(self.max_frame_size + start, l + start)
payload = self.payload[start:end]
frames.append(self.__make_frame(finbit, opcode, payload))
frame_num += 1
l -= self.max_frame_size
return frames
def __make_frame(self, finbit, opcode, payload):
rsv1_compress = 0x0
if self.__compression and (opcode is 0x0 or opcode is 0x1 or opcode is 0x2):
rsv1_compress = 0x40
frame = bytearray(struct.pack("B", opcode | finbit | rsv1_compress))
l = len(payload)
if l < 126:
length = struct.pack("B", l)
elif l < 65536:
l_code = 126
length = struct.pack("!BH", l_code, l)
else:
l_code = 127
length = struct.pack("!BQ", l_code, l)
frame.extend(length)
frame.extend(payload)
return frame
def __unmask(self, bit_tuple):
if self.__ignore_mask:
return bit_tuple
res = []
c = 0
for byte in bit_tuple:
res.append(byte ^ self.mask[c % 4])
c += 1
return bytes(res)
# return bytes(res).decode()
def __parse(self, raw_data):
offset = 0
head, payload_len = struct.unpack_from("BB", raw_data)
offset += 2
self.fin = head & 0x80 == 0x80
self.compressed = head & 0x40 == 0x40
self.opcode = head & 0xF
has_mask = payload_len & 0x80 == 0x80
if not has_mask and not self.__ignore_mask:
raise Exception("Frame without mask")
l = payload_len & 0x7F
try:
if l < 126:
if not self.__ignore_mask:
self.mask = struct.unpack_from("BBBB", raw_data, offset=offset)
offset += 4
self.frame_size = l + offset
self.payload = self.__unmask(struct.unpack_from("B" * l, raw_data, offset=offset))
elif l == 126:
l = struct.unpack_from("!H", raw_data, offset=offset)[0]
offset += 2
if not self.__ignore_mask:
self.mask = struct.unpack_from("BBBB", raw_data, offset=offset)
offset += 4
self.frame_size = l + offset
if l > len(raw_data) - offset:
self.incomplete_message = True
return
self.incomplete_message = False
self.payload = self.__unmask(struct.unpack_from("B" * l, raw_data, offset=offset))
else:
l = struct.unpack_from("!Q", raw_data, offset=offset)[0]
offset += 8
if not self.__ignore_mask:
self.mask = struct.unpack_from("BBBB", raw_data, offset=offset)
offset += 4
self.frame_size = l + offset
self.payload = self.__unmask(struct.unpack_from("B" * l, raw_data, offset=offset))
except:
raise Exception("Frame does not follow protocol")
# if self.opcode == WebSocketFrame.TEXT:
# self.payload = self.payload.decode('utf-8')
class FrameReader:
def __init__(self):
self.current_message = bytearray()
self.recieved_data = bytearray()
self.opcode = -1
self.frame_size = 0
self.compressed = False
self.messages = []
self.decompresser = Decompressor()
def read_message(self, data, compression=False):
message_rest = bytearray()
self.recieved_data.extend(data)
if len(self.recieved_data) < self.frame_size:
return []
frame = WebSocketFrame(raw_data=self.recieved_data, compression=compression)
self.frame_size = frame.frame_size
if frame.incomplete_message:
return []
else:
if len(self.recieved_data) > frame.frame_size:
message_rest = self.recieved_data[frame.frame_size:]
self.recieved_data = bytearray()
if frame.opcode is not WebSocketFrame.CONTINUOUS:
self.opcode = frame.opcode
self.compressed = frame.compressed
self.current_message.extend(frame.payload)
if frame.fin:
if compression and self.compressed:
self.current_message = self.decompresser.decompress(self.current_message)
out = (frame.opcode, self.current_message)
self.messages.append(out)
self.current_message = bytearray()
self.recieved_data = bytearray()
self.frame_size = 0
if len(message_rest) > 0:
return self.read_message(message_rest, compression=compression)
else:
messages = self.messages
self.messages = []
return messages
return []
class WebSocket:
def __init__(self, host, port, ping=True, ping_interval=5, buffer_size=8192, max_frame_size=8192,
max_connections=10, compression=True):
self.clients = []
self.host = host
self.port = port
self.ping = ping
self.ping_interval = ping_interval
self.buffer_size = buffer_size
self.max_frame_size = max_frame_size
self.compression = compression
self.server = asyncio.start_server(client_connected_cb=self.__client_connected, host=host, port=port,
loop=loop)
loop.run_until_complete(self.server)
loop.run_forever()
async def __client_connected(self, reader, writer):
client = Client(server=self, reader=reader, writer=writer, buffer_size=self.buffer_size)
self.clients.append(client)
def on_disconnect(self, client):
self.clients.remove(client)
self.on_close(client)
def on_open(self, client):
# Override to handle connections on open
return None
def on_message(self, msg, client):
# Override to handle messages from client
return None
def on_error(self, err, client):
# Override to handle error
return None
def on_close(self, client):
# Override to handle closing of client
return None
def on_ping(self, client):
# Runs when ping is sent
return None
def on_pong(self, client):
# Runs when pong is received
return None
class Client:
CONNECTING = 0
OPEN = 1
CLOSED = 2
def __init__(self, server: WebSocket, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, buffer_size: int):
self.server = server
self.reader = reader
self.writer = writer
self.buffer_size = buffer_size
self.status = Client.CONNECTING
self.sending_continuous = False
self._close_sent = False
self.__close_received = False
self.__frame_reader = FrameReader()
self.__pong_received = False
self.__last_frame_received = time.time()
self.rec = 0
# Create async task to handle client data
loop.create_task(self.__wait_for_data())
# Create async task to send pings
if self.server.ping:
loop.create_task(self.__send_ping())
def __send_frames(self, frames):
for f in frames:
self.__send_bytes(f)
def __send_bytes(self, data):
self.writer.write(data)
def write_message(self, msg, binary=False):
opcode = WebSocketFrame.BINARY if binary else WebSocketFrame.TEXT
frame = WebSocketFrame(opcode=opcode, payload=msg, max_frame_size=self.server.max_frame_size, compression=self.server.compression)
self.__send_frames(frame.construct())
def is_open(self):
return self.status == Client.OPEN
def __upgrade(self, key, compression=False):
if self.status == Client.OPEN:
return
update_header = RequestParser.create_update_header(key, compression=compression)
self.__send_bytes(str.encode(update_header))
self.status = Client.OPEN
self.server.on_open(self)
def __close_socket(self):
if self.status == Client.CLOSED:
return
self.status = Client.CLOSED
self.writer.close()
self.server.on_disconnect(self)
async def __send_ping(self):
# Sends ping if more than 5 seconds since last message received
while self.status != Client.CLOSED:
if (time.time() - self.__last_frame_received) * 1000 < 5000:
await asyncio.sleep(self.server.ping_interval)
continue
self.__pong_received = False
frame = WebSocketFrame(opcode=WebSocketFrame.PING)
self.__send_frames(frame.construct())
await asyncio.sleep(self.server.ping_interval)
if not self.__pong_received:
self.close(1002, "Pong not recieved")
def __send_pong(self):
frame = WebSocketFrame(opcode=WebSocketFrame.PONG, max_frame_size=self.server.max_frame_size)
self.__send_frames(frame.construct())
async def __wait_for_data(self):
while self.status != Client.CLOSED:
data = await self.reader.read(self.buffer_size)
if len(data) == 0:
self.__close_socket()
return
if self.status == Client.CONNECTING:
req = RequestParser()
try:
data = data.decode('utf-8')
req.parse_request(data)
except Exception as e:
raise UnicodeDecodeError(
"Error when decoding upgrade request to unicode ( " + str(e) + " )") from None
try:
req.is_valid_request(req.headers)
if self.server.compression and req.does_support_compression():
self.server.compression = True
self.__upgrade(req.headers["Sec-WebSocket-Key"], compression=True)
else:
self.server.compression = False
self.__upgrade(req.headers["Sec-WebSocket-Key"])
except AssertionError as a:
self.__close_socket()
raise Exception("Upgrade request does not follow protocol ( " + str(a) + " )") from None
elif self.status == Client.OPEN:
try:
messages = self.__frame_reader.read_message(data, compression=self.server.compression)
for data in messages:
self.__process_frame(data[0], data[1])
except Exception as e:
self.close(1002, "Received invalid frame")
raise Exception("Invalid frame received, closing connection (" + str(e) + ")")
else:
raise Exception("Recieved message from client who was not open or connecting")
def __process_frame(self, opcode, message):
self.__last_frame_received = time.time()
if opcode <= WebSocketFrame.CONTINUOUS:
return
elif opcode == WebSocketFrame.TEXT:
message = message.decode('utf-8')
self.server.on_message(message, self)
elif opcode == WebSocketFrame.BINARY:
self.server.on_message(message, self)
elif opcode == WebSocketFrame.CLOSE:
self.__close_received = True
self.__close_conn_res()
elif opcode == WebSocketFrame.PING:
self.__send_pong()
self.server.on_ping(self)
elif opcode == WebSocketFrame.PONG:
self.__pong_received = True
self.server.on_pong(self)
# Call this class every time close frame is sent or recieved
# Checks if client has requested closing, if so sends a closing frame and closes connection
# If close frame is sent and recieved
async def __async_force_close(self, timeout):
await asyncio.sleep(timeout)
if not self.__close_received:
self.__close_socket()
def __force_close(self, timeout):
loop.create_task(self.__async_force_close(timeout))
# Call this class to respond to a close connection request
def __close_conn_res(self):
if not self._close_sent:
frame = WebSocketFrame(opcode=WebSocketFrame.CLOSE, max_frame_size=self.server.max_frame_size)
self.__send_frames(frame.construct())
self._close_sent = True
self.__close_socket()
else:
self.__close_socket()
# Call class to request closing of connection to client
def close(self, status, reason):
# Status and reason not implemented
if not self._close_sent:
frame = WebSocketFrame(opcode=WebSocketFrame.CLOSE, max_frame_size=self.server.max_frame_size)
self.__send_frames(frame.construct())
self.__force_close(1)
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,515
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/setup.py
|
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r","")
except (IOError, ImportError):
long_description='',
setup(
name = 'simple_ws',
packages = ['simple_ws'],
version = '0.3.0',
description = 'Simple websocket implementation in python using asyncio',
license = "MIT",
long_description = long_description,
author = 'Ole Kristian Aune, Even Dalen, Audun Wigum Arbo',
author_email = 'even.dalen@live.no',
url = 'https://github.com/WSnettverksprog/simple_ws',
download_url = 'https://github.com/WSnettverksprog/simple_ws/archive/0.1.tar.gz',
keywords = ['websocket', 'ws', 'asyncio', 'simple'],
)
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,516
|
WSnettverksprog/simple_ws
|
refs/heads/master
|
/test/test_frame_reader.py
|
import unittest
from simple_ws import FrameReader
class FrameReaderTestMethods(unittest.TestCase):
def test_continuation_frame(self):
fr = FrameReader()
frame_1 = b'\x01\x83\x00\x00\x00\x00hei'
frame_2 = b'\x80\x83\x00\x00\x00\x00 du'
res1 = fr.read_message(frame_1)[1]
res2 = fr.read_message(frame_2)[1]
self.assertEqual(res1, None)
self.assertEqual("hei du", res2.decode('utf-8'))
|
{"/ws_example.py": ["/simple_ws/__init__.py"], "/test/test_ws_frame.py": ["/simple_ws/__init__.py"], "/simple_ws/__init__.py": ["/simple_ws/WebSocket.py"], "/test/test_request_parser.py": ["/simple_ws/__init__.py"], "/test/test_frame_reader.py": ["/simple_ws/__init__.py"]}
|
30,519
|
justhinkdp/zhxg_qg
|
refs/heads/master
|
/LGB_KEY_ZQJ_3_1.py
|
# encoding:utf-8
# 训练分类器.
import lightgbm as lgb
import numpy as np
import vsm
path = './'
def lgb_key_train():
clabel =1
data = vsm.vsmbuild(clabel)
np.random.shuffle(data) # 打乱数据顺序
print('data', data.shape)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_classes': 5,
'metric': 'multi_error',
'max_depths': 6,
'num_leaves': 60,
'learning_rate': 0.01,
'feature_fraction': 0.7,
'bagging_fraction': 0.9,
'bagging_freq': 5,
'verbose': 1,
# 'num_threads':4,
}
acc_list = [0,0,0,0,0]
for i in range(5):
print(i)
train_data=np.concatenate([data[0:i*len(data)//5],data[(i+1)*len(data)//5:]])
valid_data=data[i*len(data)//5:(i+1)*len(data)//5]
train_d = lgb.Dataset(train_data[:,:-1], train_data[:,-1])
valid_d = lgb.Dataset(valid_data[:, :-1], valid_data[:, -1])
lis={}
clf = lgb.train(params, train_d, evals_result=lis, num_boost_round=200000,
valid_sets=[valid_d], early_stopping_rounds=100, verbose_eval=10)
clf.save_model(path+"models/key_cv"+str(i)+".m")
clf = lgb.Booster(model_file=path+"models/key_cv"+str(i)+".m")
# print clf.feature_importance()
r=clf.predict(valid_data[:, :-1])
for k in range(5):
ct0=0
ct1=0
for j,v in enumerate(r):
if np.where(v==max(v))[0]==k:
ct0+=1
if valid_data[j,-1]==k:
ct1+=1
if ct0!= 0 :
print(k,ct0,ct1,ct1*1.0/ct0)
acc_list[k] += ct1*1.0/ct0
else:
print(k,ct0,ct1,0)
print('\n\n')
print(acc_list)
lgb_key_train()
|
{"/LGB_KEY_ZQJ_3_1.py": ["/vsm.py"]}
|
30,520
|
justhinkdp/zhxg_qg
|
refs/heads/master
|
/vsm.py
|
# encoding:utf-8
import numpy as np
import jieba
# vsm.py负责构建文本向量,用来训练模型/进行预测
def vsmbuild(clabel):
# 处理第一层3个场景+others 场景的训练数据,转化为向量形式
# word2id中key为关键词,value为关键词在word2id中的编号/位置(从0开始)
# word2id_cat中key为关键词,value为关键词对应的场景
if clabel == 1:
word2id = {}
word2id_cat = {}
path = './data/'
# 处理TF-IDF查找到的关键词,将其放在word2id这个dict中,key为关键词,value为1,2,3···
for line in open(path + "keywords_single_250.txt", encoding='UTF-8'):
for w in line.split():
word2id[w.strip()] = len(word2id) # 相当于给每个feature.py提取的关键词编号1,2,3,4...,‘关键词1’:‘1’
ct=0
counts = [0, 0, 0, 0, 0]
# 处理TF-IDF查找到的关键词,将其放在word2id_cat这个dict中,key为关键词,value为0/1/2/3/4,代表5个场景
for line in open(path + "keywords_single_250.txt", encoding='UTF-8'):
for w in line.split():
word2id_cat[w.strip()] = ct
counts[ct] += 1
ct += 1
# 处理训练数据,转化为向量形式
data = []
paths = [path + "level2new.txt", path + "level1new.txt", path + "level0new.txt", path + "level-1new.txt", path + "level-2new.txt"]
for i in range(5):
# print i,
for line in open(paths[i], 'rb'):
# tp为该条文本转化为的词向量,词向量长度为关键词长度+6,分别代表5个场景命中了多少个关键词+本条语句属于某一场景 e.g. tp=[1,0,1,...,1,0,14,15,16,2],最后四个之前表示命中了哪几个关键词,14表示命中14个场景0的关键词,15表示命中15个场景1的关键词,16表示命中16个场景2的关键词,2表示该文本属于场景2
tp = [0] * (len(word2id) + 6)
for w in jieba.cut(line):
if line == '\n':
continue
# 查找line中分词w是否在word2id某一key中,如果在,则把tp[word2id[key]]设为1,即表示包含该关键词
for key in word2id:
if w in key:
tp[word2id[key]] += 1
# 查找line中分词w是否在word2id_cat某一key中,如果在,则在对于场景命中关键词的位置+1
for key in word2id_cat:
if w in key:
tp[-(word2id_cat[key] + 2)] += 1
# 该条文本属于哪个场景则tp最后一个位置写几
tp[-1] = i
# tp放入data,data为训练文本转化为的文本向量,用于后续的训练模型
data.append(tp)
data = np.array(data)
return data
|
{"/LGB_KEY_ZQJ_3_1.py": ["/vsm.py"]}
|
30,521
|
justhinkdp/zhxg_qg
|
refs/heads/master
|
/predict_ZQJ_3_1.py
|
# -*- coding:utf-8 -*-
import lightgbm as lgb
import jieba
import re
import numpy as np
path = './data/'
# sentence是词典,存储要预测是语句
def key_cv(sentence):
word2id={}
word2id_cat={}
word2id_cat_m = {}
# mergerate=0.05
# tfidf keyword
for line in open(path+"keywords_single_250.txt", encoding='UTF-8'):
for w in line.split():
word2id[w.strip()] = len(word2id)
ct = 0
counts = [0,0,0,0,0]
for line in open(path+"keywords_single_250.txt", encoding='UTF-8'):
for w in line.split():
word2id_cat[w.strip()]=ct
counts[ct] += 1
ct += 1
data=[]
# t1 = open('D:\CodeProject\PythonProject\\nlp_zhxg\\'+testfile+'.txt') # 打开头目录的要预测的文件,因为是第一次打开,还没有预测后剩下的other文件
# d1 = t1.read().split('\r')
# print len(d1)
# t1.close()
d1 = sentence
kdr = []
for s in d1:
if d1[s] == '':
continue
#content = d1[s].split('|')[2] # s是key,content是需要预测的语句
content = d1[s]
m = re.findall('[\d]+\.wav[\d|!|_|。]+', content)
for mm in m:
content = content.replace(mm, '')
content = re.sub('[\d]+_[\d]+_', '', content)
tp = [0]*(len(word2id) + 5)
# 四个set表示四个类别中有哪些关键词在这个语句中命中
kd = [set(), set(), set(), set(), set()]
# kdr.append(kd)
for w in jieba.cut(content):
for key in word2id:
if w in key:
tp[word2id[key]] += 1
for key in word2id_cat:
if w in key:
tp[-(word2id_cat[key] + 1)] += 1
data.append(tp)
kdr.append(kd)
# 处理后得到数组data进行预测
data=np.array(data)
# print data.shape
r=[]
for i in range(5):
clf = lgb.Booster(model_file="./models/key_cv" + str(i) + ".m")
if len(r)==0:
r=clf.predict(data)
else:
r+=clf.predict(data)
rr = ['2', '1', '0', '-1', '-2']
tow = open('result.txt', 'w',encoding='UTF-8')
# townext = open(path + testfile+'_others.txt', 'wb')
# 将预测结果r与原始数据文字部分d1打包,即r与d1一一对应,d1为词典,在这里v[1]是词典的key
for v in list(zip(r,d1,kdr)):
tpr=np.where(v[0][:]==max(v[0][:]))[0][0]
# print(tpr)
b = np.argsort(np.array(list(v[0])))
# print(v[0])
# print(rr[tpr])
value = rr[tpr]
write_str = str(v[1])+':'+ value + "\n"
tow.write(write_str)
# 直接输出,输出为:种类+语句
# print rr[tpr] + "|" + str(round(v[0][tpr], 2)) + "|" + v[1].strip() + d1[v[1]]
# 删掉预测过的语句
tow.flush()
tow.close()
print("预测完成")
|
{"/LGB_KEY_ZQJ_3_1.py": ["/vsm.py"]}
|
30,533
|
fatwookie/netcrawl
|
refs/heads/master
|
/netcrawl.py
|
#!/usr/bin/env python3
#
"""
This script contacts a network device via SNMP and tries to download
the contents of the MAC address table. This should normally be available
at the OID 1.3.6.1.2.1.17.4.3.1 from the BRIDGE-MIB, using object
dot1dTpFdbEntry.
See also: http://www.cisco.com/en/US/tech/tk648/tk362/technologies_tech_note09186a0080094a9b.shtml
SNMP MIB-2 SYSTEM
system => 1.3.6.1.2.1.1
ifDescr => 1.3.6.1.2.1.2.2.1.2
ifName => 1.3.6.1.2.1.31.1.1.1.1
dot1dTpFdbEntry => 1.3.6.1.2.1.17.4.3.1
g = getCmd(SnmpEngine(),CommunityData('public'),UdpTransportTarget(('demo.snmplabs.com', 161)),ContextData(),ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
g = getCmd(SnmpEngine(),CommunityData('public'),UdpTransportTarget(('demo.snmplabs.com', 161)),ContextData(),ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
"""
import argparse
import re
from ncrawl import *
oid = "1.3.6.1.2.1.17.4.3.1"
def main():
parser = argparse.ArgumentParser(description='Scan the switch TCAM')
parser.add_argument('ip_range', help='IP range to scan')
args = parser.parse_args()
det_snmp_up(args.ip_range, community)
if __name__ == "__main__":
main()
exit
|
{"/netcrawl.py": ["/ncrawl.py"]}
|
30,534
|
fatwookie/netcrawl
|
refs/heads/master
|
/ncrawl.py
|
import pyasn1
try:
from config import *
except:
print("Import error: config.py. Make sure to view README")
exit(2)
try:
import ipaddress
except:
print("Import error: ipaddress. Make sure to run pip3 install -r requirements.txt")
exit(2)
try:
from pysnmp.hlapi import *
except:
print("Import error: pysnmp. Make sure to run pip3 install -r requirements.txt")
exit(2)
def det_scan_targets(ip_range):
targets = []
scan_target_net = ipaddress.ip_network(ip_range)
for scan_target_ip in scan_target_net.hosts():
targets.append(str(scan_target_ip))
return targets
def det_snmp_up(ip_range, community):
for target in det_scan_targets(ip_range):
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData(community),
UdpTransportTarget((target, 161)),
ContextData(),ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
)
if errorIndication:
print('No response from: {}'.format(str(target)))
elif errorStatus:
print('{} at {}'.format(str(errorStatus),str(errorIndex) and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
|
{"/netcrawl.py": ["/ncrawl.py"]}
|
30,566
|
phenal-projects/biokg_rgcn
|
refs/heads/master
|
/training_utils.py
|
from itertools import product
import torch
from torch import nn
from torch.nn import functional as F
from torch_sparse import SparseTensor
from sklearn.metrics import roc_auc_score, average_precision_score
def drop_edges(mat, p=0.3):
mask = torch.rand((mat.storage.row().shape[0],)) > p
matr = SparseTensor(
row=mat.storage.row()[mask],
col=mat.storage.col()[mask],
value=mat.storage.value()[mask],
sparse_sizes=mat.storage.sparse_sizes(),
)
return matr, mask
def test(z, decoder, entity_types, pos_edge_index, neg_edge_index):
pos_y = z.new_ones(pos_edge_index.size(1))
neg_y = z.new_zeros(neg_edge_index.size(1))
y = torch.cat([pos_y, neg_y], dim=0)
pos_pred = decoder(z, pos_edge_index, entity_types)
neg_pred = decoder(z, neg_edge_index, entity_types)
pred = torch.cat([pos_pred, neg_pred], dim=0)
y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()
return roc_auc_score(y, pred), average_precision_score(y, pred)
def negative_sample(
positive_sample,
start_head_index,
stop_head_index,
start_tail_index,
stop_tail_index,
size,
):
heads, tails = positive_sample
heads = heads[torch.randint(0, len(heads), size=(size // 2,))]
tails = tails[torch.randint(0, len(tails), size=(size // 2,))]
neg_heads = torch.randint(
start_head_index, stop_head_index, size=(size // 2,)
)
neg_tails = torch.randint(
start_tail_index, stop_tail_index, size=(size // 2,)
)
return torch.stack(
(torch.cat((heads, neg_heads)), torch.cat((neg_tails, tails)))
)
def logloss(pos_scores, neg_scores, adversarial_temperature=1.0):
pos_loss = -F.logsigmoid(pos_scores).sum()
neg_loss = -(
F.softmax(neg_scores * adversarial_temperature, dim=0).detach()
* F.logsigmoid(-neg_scores)
).sum()
return (pos_loss + neg_loss), float(len(pos_scores) + len(neg_scores))
def train_step(
model,
optimizer,
train_adj_t,
pos_val,
neg_val,
entity_type_dict,
relation_to_entity,
edge_types_to_train,
neg_sample_size,
device,
):
train_pos_adj, dropmask = drop_edges(train_adj_t)
model.train()
optimizer.zero_grad()
z = model.encode(train_pos_adj)
pos_scores = list()
neg_scores = list()
for edge_type in edge_types_to_train:
pos_edges = torch.stack(
(
train_adj_t.storage.row()[~dropmask][
train_adj_t.storage.value()[~dropmask] == edge_type
],
train_adj_t.storage.col()[~dropmask][
train_adj_t.storage.value()[~dropmask] == edge_type
],
)
)
if pos_edges.shape[-1] != 0:
pos_scores.append(
model.decoder(
z, pos_edges.to(device), edge_type, sigmoid=False
)
)
possible_tail_nodes = entity_type_dict[
relation_to_entity["tail"][edge_type]
]
possible_head_nodes = entity_type_dict[
relation_to_entity["head"][edge_type]
]
for _ in range(neg_sample_size):
neg_edges = negative_sample(
pos_edges,
*possible_head_nodes,
*possible_tail_nodes,
int(len(pos_edges[0]))
)
neg_scores.append(
model.decoder(
z, neg_edges.to(device), edge_type, sigmoid=False
)
)
l, w = logloss(torch.cat(pos_scores), torch.cat(neg_scores))
l.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
model.eval()
with torch.no_grad():
auc, ap = test(
z, model.decoder, 0, pos_val.to(device), neg_val.to(device),
)
return model, auc, ap, l.item() / w
def ft_inference(
model,
cl_head_1,
cl_head_2,
train_adj_t,
protein_bounds,
disease_bounds,
df,
device,
):
z = model.encode(train_adj_t)
embs_protein = torch.zeros((len(df), z.shape[1])).to(device)
embs_disease = torch.zeros((len(df), z.shape[1])).to(device)
min_mean_max = torch.zeros((len(df)), 3).to(device)
neutral_protein = z[protein_bounds[0] : protein_bounds[1]].mean(0)
neutral_disease = z[disease_bounds[0] : disease_bounds[1]].mean(0)
for i, (_, idx) in enumerate(df.iterrows()):
if len(idx["protein"]) > 0:
embs_protein[i] = z[idx["protein"]].mean(0)
else:
embs_protein[i] = neutral_protein
if len(idx["disease"]) > 0:
embs_disease[i] = z[idx["disease"]].mean(0)
else:
embs_disease[i] = neutral_disease
if (len(idx["protein"]) > 0) and (len(idx["disease"]) > 0):
prod = torch.LongTensor(
list(product(idx["protein"], idx["disease"]))
).T
d = model.decoder(z, prod, 0, sigmoid=False)
min_mean_max[i, 0] = d.min()
min_mean_max[i, 1] = d.mean()
min_mean_max[i, 2] = d.max()
else:
min_mean_max[i, 0] = 0.0
min_mean_max[i, 1] = 0.0
min_mean_max[i, 2] = 0.0
z1 = cl_head_1(torch.cat((embs_protein, embs_disease), 1))
probas = cl_head_2(torch.cat((z1, min_mean_max), 1))
return probas
|
{"/run.py": ["/models.py", "/data.py", "/training_utils.py"]}
|
30,567
|
phenal-projects/biokg_rgcn
|
refs/heads/master
|
/models.py
|
import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric import nn as gnn
from torch_geometric.nn.inits import glorot_orthogonal
class RGCNStack(nn.Module):
def __init__(
self,
initial_size,
output_size,
middle_size_1,
middle_size_2,
num_nodes,
num_relations,
device1,
device2,
):
super().__init__()
self.device1 = device1
self.device2 = device2
self.emb = nn.parameter.Parameter(
torch.ones((num_nodes, initial_size)), requires_grad=True
).to(device1)
glorot_orthogonal(self.emb, 1)
self.conv1 = gnn.RGCNConv(
initial_size, middle_size_1, num_relations, num_bases=12
).to(device1)
self.conv2 = gnn.RGCNConv(
middle_size_1, middle_size_2, num_relations, num_bases=12
).to(device1)
self.conv3 = gnn.RGCNConv(
middle_size_2,
output_size - middle_size_2 - middle_size_1 - initial_size,
num_relations,
num_bases=12,
).to(device1)
self.drop = nn.Dropout(0.2)
def forward(self, adj_t, edge_types=None):
"""Calculates embeddings"""
adj_t = adj_t.to(self.device1)
if edge_types is not None:
edge_types = edge_types.to(self.device1)
x1 = F.relu(self.conv1(self.emb, adj_t, edge_types))
x2 = F.relu(self.conv2(x1, adj_t, edge_types))
x3 = F.relu(self.conv3(x2, adj_t, edge_types))
emb = self.emb.to(self.device2)
x1 = x1.to(self.device2)
x2 = x2.to(self.device2)
x3 = x3.to(self.device2)
x3 = torch.cat((x3, x2, x1, emb), 1)
x3 = self.drop(x3)
return x3
def change_devices(self, device1, device2):
self.device1 = device1
self.device2 = device2
self.to(device1)
class Lookup(nn.Module):
def __init__(
self, initial_size, num_nodes, num_relations, *args, **kwargs
):
super().__init__()
self.emb = nn.parameter.Parameter(
torch.ones((num_nodes, initial_size)), requires_grad=True
)
glorot_orthogonal(self.emb, 1)
self.drop = nn.Dropout(0.2)
def forward(self, adj_t, edge_types=None):
"""Calculates embeddings"""
return self.drop(self.emb)
class DistMult(nn.Module):
def __init__(self, input_size, num_relations):
super().__init__()
self.rel = nn.parameter.Parameter(
torch.ones((num_relations, input_size)), requires_grad=True
)
glorot_orthogonal(self.rel, 1)
def forward(self, z, edge_index, relation_id, sigmoid=True):
res = (
(z[edge_index[0]] * self.rel[relation_id]) * z[edge_index[1]]
).sum(dim=1)
if not sigmoid:
return res
return torch.sigmoid(res)
|
{"/run.py": ["/models.py", "/data.py", "/training_utils.py"]}
|
30,568
|
phenal-projects/biokg_rgcn
|
refs/heads/master
|
/run.py
|
import argparse
from collections import defaultdict
from itertools import chain
import mlflow
import numpy as np
import pandas as pd
import torch
import torch.optim as opt
from ogb.linkproppred import Evaluator
from sklearn.metrics import average_precision_score, roc_auc_score
from torch import nn
from torch_geometric import nn as gnn
from torch_sparse import SparseTensor
import models
from data import load_biokg, load_dataset
from training_utils import train_step, ft_inference
# Setup parser
parser = argparse.ArgumentParser()
parser.add_argument("--seed", help="set a seed for PRNG", type=int, default=0)
parser.add_argument(
"--size1",
help="set the size of the initial embeddings",
type=int,
default=52,
)
parser.add_argument(
"--size2",
help="set the size of the middle embeddings",
type=int,
default=52,
)
parser.add_argument(
"--size3",
help="set the size of the last part of the embeddings",
type=int,
default=52,
)
parser.add_argument(
"--size4",
help="set the size of the last part of the embeddings",
type=int,
default=52,
)
parser.add_argument("--negsize", help="negsize/possize", type=int, default=1)
parser.add_argument(
"--adv",
help="set the adversarial temperature for the negative part of the loss",
type=float,
default=1.0,
)
parser.add_argument(
"--lr", help="set the learning rate", type=float, default=0.005
)
parser.add_argument(
"--wd", help="set the weight decay", type=float, default=0.0001
)
parser.add_argument(
"--epochs", help="set the number of epochs to train", type=int, default=400
)
parser.add_argument(
"--device1", help="the device to train on, part 1", type=str, default="cpu"
)
parser.add_argument(
"--device2", help="the device to train on, part 2", type=str, default="cpu"
)
parser.add_argument(
"--data",
help="'biokg' or a path to directory with datasets",
type=str,
default="biokg",
)
parser.add_argument(
"--target_relation",
help="an id of target relation. Increases its weight in the loss",
type=int,
default=0,
)
parser.add_argument(
"--mlflow",
help="URI of the mlflow instance for logging",
type=str,
default="http://localhost:12345",
)
parser.add_argument(
"--finetuning_dataset",
help="a path to a hdf file with disease-target pairs for CTOP finetuning",
type=str,
default="None",
)
parser.add_argument(
"--finetuning_model",
help="a path to a model to finetune",
type=str,
default="None",
)
args = parser.parse_args()
# Reproducibility
torch.set_deterministic(True)
torch.manual_seed(args.seed)
mlflow.set_tracking_uri(args.mlflow)
# Load the dataset and split edges
if args.data == "biokg":
train_edge, valid_edge, test_edge, entity_type_dict = load_biokg()
else:
train_edge, valid_edge, test_edge, entity_type_dict = load_dataset(
args.data
)
head = train_edge["head"]
tail = train_edge["tail"]
# Some useful values
num_relations = train_edge["relation"].max() + 1
num_nodes = max(entity_type_dict.values())[1] + 1
relation_to_entity = defaultdict(dict)
for i in range(num_relations):
relation_to_entity["head"][i] = np.array(train_edge["head_type"])[
train_edge["relation"] == i
][0]
relation_to_entity["tail"][i] = np.array(train_edge["tail_type"])[
train_edge["relation"] == i
][0]
# Prepare training data
train_adj_t = SparseTensor(
row=head,
col=tail,
value=train_edge["relation"],
sparse_sizes=(num_nodes, num_nodes),
)
# Prepare validation data (only for relation == 0, entailment)
pos_val = torch.stack((valid_edge["head"], valid_edge["tail"]))[
:, valid_edge["relation"] == args.target_relation
]
neg_val = torch.stack(
(
pos_val[0],
valid_edge["tail_neg"][
valid_edge["relation"] == args.target_relation, 0
],
)
)
if args.finetuning_model == "None":
# Model
encoder = models.RGCNStack(
args.size1,
args.size1 + args.size2 + args.size3 + args.size4,
args.size2,
args.size3,
num_nodes,
num_relations,
args.device1,
args.device2,
)
decoder = models.DistMult(
args.size1 + args.size2 + args.size3 + args.size4, num_relations
).to(args.device2)
model = gnn.GAE(encoder, decoder)
else:
model = torch.load(args.finetuning_model)
model.encoder.change_devices(args.device1, args.device2)
model.decoder.to(args.device2)
optimizer = opt.Adam(
model.parameters(), args.lr, weight_decay=args.wd, amsgrad=True
)
best_loss = 0.5
best_auc = 0.0
with mlflow.start_run():
for epoch in range(args.epochs):
model, auc, ap, loss = train_step(
model,
optimizer,
train_adj_t,
pos_val,
neg_val,
entity_type_dict,
relation_to_entity,
list(range(num_relations)),
args.negsize,
args.device2,
)
if auc > best_auc:
torch.save(model, "best_auc.pt")
best_auc = auc
if loss < best_loss:
torch.save(model, "best_loss.pt")
best_loss = loss
mlflow.log_metric(key="balanced_roc_auc", value=auc, step=epoch)
mlflow.log_metric(key="balanced_ap", value=ap, step=epoch)
mlflow.log_metric(key="loss", value=loss, step=epoch)
model = torch.load("best_auc.pt")
mlflow.log_artifact("best_auc.pt")
# Link-prediction validation
evaluator = Evaluator(name="ogbl-biokg")
with torch.no_grad():
model.eval()
z = model.encode(train_adj_t)
results = []
for et in range(num_relations):
subresults = []
pos_val = torch.stack(
(
test_edge["head"][test_edge["relation"] == et],
test_edge["tail"][test_edge["relation"] == et],
)
)
subresults.append(
model.decoder(z, pos_val.to(args.device2), et)
.detach()
.cpu()
.numpy()
)
for i in range(500):
tail_neg = test_edge["tail_neg"][
test_edge["relation"] == et, i
]
subresults.append(
model.decoder(
z,
torch.stack((pos_val[0], tail_neg)).to(args.device2),
et,
)
.detach()
.cpu()
.numpy()
)
results.append(np.stack(subresults))
scores = np.concatenate(results, 1).T
eval_results = evaluator.eval(
{"y_pred_pos": scores[:, 0], "y_pred_neg": scores[:, 1:]}
)
mlflow.log_metric(
key="test_lp_mrr_{}".format(et),
value=eval_results["mrr_list"].mean(),
)
# CTOP validation
best_auc = 0.0
# Data
ctop_ds = pd.read_hdf(args.finetuning_dataset, "ctop")
train = ctop_ds[ctop_ds["subset"] == "train"]
train_y = torch.tensor(train["result"].values).reshape(-1, 1)
val = ctop_ds[ctop_ds["subset"] == "test"]
val_y = val["result"].values.reshape(-1, 1)
# Models
cl_head_1 = nn.Sequential(
nn.Linear(
2 * (args.size1 + args.size2 + args.size3 + args.size4), 128
),
nn.ReLU(),
nn.Linear(128, 13),
nn.ReLU(),
).to(args.device2)
cl_head_2 = nn.Sequential(
nn.Linear(16, 32), nn.ReLU(), nn.Linear(32, 1)
).to(args.device2)
# Optim and loss
optimizer = opt.Adam(
chain(
model.parameters(), cl_head_1.parameters(), cl_head_2.parameters()
),
args.lr,
amsgrad=True,
)
ls = nn.BCEWithLogitsLoss()
# ids bounds for neutral embeddings
if args.data == "biokg":
protein_bounds = (
entity_type_dict["protein"][0],
entity_type_dict["protein"][1],
)
disease_bounds = (
entity_type_dict["disease"][0],
entity_type_dict["disease"][1],
)
else:
protein_bounds = (entity_type_dict[0][0], entity_type_dict[0][1])
disease_bounds = (entity_type_dict[1][0], entity_type_dict[1][1])
for epoch in range(300):
model.train()
optimizer.zero_grad()
probas = ft_inference(
model,
cl_head_1,
cl_head_2,
train_adj_t,
protein_bounds,
disease_bounds,
train,
args.device2,
)
loss = ls(probas, train_y.to(args.device2))
loss.backward()
mlflow.log_metric(
key="ft_loss", value=loss.item(), step=epoch + args.epochs
)
optimizer.step()
# validation
with torch.no_grad():
model.eval()
probas = ft_inference(
model,
cl_head_1,
cl_head_2,
train_adj_t,
protein_bounds,
disease_bounds,
val,
args.device2,
)
auc = roc_auc_score(
val["result"][~val["result"].isna()],
probas.cpu().numpy()[~val["result"].isna()].reshape(-1),
)
mlflow.log_metric(
key="ft_auc_val", value=auc, step=epoch + args.epochs
)
if auc > best_auc:
torch.save(model, "best_auc_ft.pt")
best_auc = auc
# Testing
model = torch.load("best_auc_ft.pt")
with torch.no_grad():
for subset in ctop_ds["subset"].unique():
if subset != "train":
test = ctop_ds[ctop_ds["subset"] == subset]
probas = ft_inference(
model,
cl_head_1,
cl_head_2,
train_adj_t,
protein_bounds,
disease_bounds,
test,
args.device2,
)
if len(test["result"][~test["result"].isna()]) > 0:
auc, ap = (
roc_auc_score(
test["result"][~test["result"].isna()],
probas.cpu()
.numpy()[~test["result"].isna()]
.reshape(-1),
),
average_precision_score(
test["result"][~test["result"].isna()],
probas.cpu()
.numpy()[~test["result"].isna()]
.reshape(-1),
),
)
mlflow.log_metric(
key="ft_auc_{}".format(subset), value=auc
)
mlflow.log_metric(key="ft_ap_{}".format(subset), value=ap)
torch.save(model, "last.pt")
torch.save(cl_head_1, "head1.pt")
torch.save(cl_head_2, "head2.pt")
mlflow.log_artifact("last.pt")
mlflow.log_artifact("best_auc_ft.pt")
mlflow.log_artifact("head1.pt")
mlflow.log_artifact("head2.pt")
|
{"/run.py": ["/models.py", "/data.py", "/training_utils.py"]}
|
30,569
|
phenal-projects/biokg_rgcn
|
refs/heads/master
|
/data.py
|
from ogb.linkproppred.dataset_pyg import PygLinkPropPredDataset
import numpy as np
import torch
def load_biokg():
biokg = PygLinkPropPredDataset(name="ogbl-biokg", root="./datasets")
split_edge = biokg.get_edge_split()
train_edge, valid_edge, test_edge = (
split_edge["train"],
split_edge["valid"],
split_edge["test"],
)
entity_type_dict = dict()
cur_idx = 0
for key in biokg[0]["num_nodes_dict"]:
entity_type_dict[key] = (
cur_idx,
cur_idx + biokg[0]["num_nodes_dict"][key],
)
cur_idx += biokg[0]["num_nodes_dict"][key]
train_edge["head"] = (
torch.tensor([entity_type_dict[x][0] for x in train_edge["head_type"]])
+ train_edge["head"]
)
train_edge["tail"] = (
torch.tensor([entity_type_dict[x][0] for x in train_edge["tail_type"]])
+ train_edge["tail"]
)
valid_edge["head"] = (
torch.tensor([entity_type_dict[x][0] for x in valid_edge["head_type"]])
+ valid_edge["head"]
)
valid_edge["tail"] = (
torch.tensor([entity_type_dict[x][0] for x in valid_edge["tail_type"]])
+ valid_edge["tail"]
)
valid_edge["head_neg"] = (
torch.tensor(
[entity_type_dict[x][0] for x in valid_edge["head_type"]]
).reshape(-1, 1)
+ valid_edge["head_neg"]
)
valid_edge["tail_neg"] = (
torch.tensor(
[entity_type_dict[x][0] for x in valid_edge["tail_type"]]
).reshape(-1, 1)
+ valid_edge["tail_neg"]
)
test_edge["head"] = (
torch.tensor([entity_type_dict[x][0] for x in test_edge["head_type"]])
+ test_edge["head"]
)
test_edge["tail"] = (
torch.tensor([entity_type_dict[x][0] for x in test_edge["tail_type"]])
+ test_edge["tail"]
)
test_edge["head_neg"] = (
torch.tensor(
[entity_type_dict[x][0] for x in test_edge["head_type"]]
).reshape(-1, 1)
+ test_edge["head_neg"]
)
test_edge["tail_neg"] = (
torch.tensor(
[entity_type_dict[x][0] for x in test_edge["tail_type"]]
).reshape(-1, 1)
+ test_edge["tail_neg"]
)
return train_edge, valid_edge, test_edge, entity_type_dict
def min_or_inf(array):
if len(array) == 0:
return float("inf")
return array.min()
def max_or_inf(array):
if len(array) == 0:
return -float("inf")
return array.max()
def load_dataset(path):
train_edge = dict()
valid_edge = dict()
test_edge = dict()
# four row, (s, p, o, s_type, o_type, train/val/test)
triples = np.load(path)
if triples.shape[1] == 6 and triples.shape[0] != 6:
triples = triples.T
# nodes of the same type should have idx within one continuous interval
entity_type_dict = dict()
entity_types = set(triples[3]) | set(triples[4])
for e in entity_types:
entity_type_dict[e] = (
min(
min_or_inf(triples[0, triples[3] == e]),
max_or_inf(triples[2, triples[4] == e]),
),
max(
min_or_inf(triples[0, triples[3] == e]),
max_or_inf(triples[2, triples[4] == e]),
),
)
train_edge["head"] = torch.tensor(triples[0, triples[5] == 0])
train_edge["relation"] = torch.tensor(triples[1, triples[5] == 0])
train_edge["tail"] = torch.tensor(triples[2, triples[5] == 0])
train_edge["head_type"] = torch.tensor(triples[3, triples[5] == 0])
train_edge["tail_type"] = torch.tensor(triples[4, triples[5] == 0])
valid_edge["head"] = torch.tensor(triples[0, triples[5] == 1])
valid_edge["relation"] = torch.tensor(triples[1, triples[5] == 1])
valid_edge["tail"] = torch.tensor(triples[2, triples[5] == 1])
valid_edge["head_type"] = torch.tensor(triples[3, triples[5] == 1])
valid_edge["tail_type"] = torch.tensor(triples[4, triples[5] == 1])
test_edge["head"] = torch.tensor(triples[0, triples[5] == 2])
test_edge["relation"] = torch.tensor(triples[1, triples[5] == 2])
test_edge["tail"] = torch.tensor(triples[2, triples[5] == 2])
test_edge["head_type"] = torch.tensor(triples[3, triples[5] == 2])
test_edge["tail_type"] = torch.tensor(triples[4, triples[5] == 2])
valid_edge["head_neg"] = torch.stack(
[
torch.randint(
entity_type_dict[x.item()][0],
entity_type_dict[x.item()][1],
size=(500,),
)
for x in valid_edge["head_type"]
]
)
valid_edge["tail_neg"] = torch.stack(
[
torch.randint(
entity_type_dict[x.item()][0],
entity_type_dict[x.item()][1],
size=(500,),
)
for x in valid_edge["tail_type"]
]
)
test_edge["head_neg"] = torch.stack(
[
torch.randint(
entity_type_dict[x.item()][0],
entity_type_dict[x.item()][1],
size=(500,),
)
for x in test_edge["head_type"]
]
)
test_edge["tail_neg"] = torch.stack(
[
torch.randint(
entity_type_dict[x.item()][0],
entity_type_dict[x.item()][1],
size=(500,),
)
for x in test_edge["tail_type"]
]
)
return train_edge, valid_edge, test_edge, entity_type_dict
|
{"/run.py": ["/models.py", "/data.py", "/training_utils.py"]}
|
30,599
|
Rookiee/Research_Related
|
refs/heads/master
|
/test.py
|
# coding: utf-8
'''
导入本目录下的hos.py
'''
import hos
import cv2
pathName = 'C:/Users/Administrator/Desktop/result1.bmp'
imgGray = cv2.imread(pathName,0)
imgGaussian = hos.GaussianFilter(imgGray,(3,3),1)
|
{"/test.py": ["/hos.py"]}
|
30,600
|
Rookiee/Research_Related
|
refs/heads/master
|
/reName.py
|
# coding: utf-8
"""
Author: Rookiee
Date: 2016-4-4
Location: Donghua University
Destription: Rename file names in the folder.
The new file names will be 0001.bmp, 0002.bmp...
The user need to provide two paramters:
absolute path: d:/test
extension name: txt
"""
import os
# Input the absolute path of a folder and the extension name of
# the files need to be changed.
path = raw_input("Enter the absolute path \n"
"(using '/' for suggestion): ")
ext = raw_input("Enter the extension name: ")
prefix = raw_input("Enter the prefix (Enter directly if you don't need a prefix): ")
try:
os.listdir(path)
except WindowsError:
print ("######################")
print ("Invalid path! ")
print ("Enter a correct path! ")
print ("######################")
if path[-1] != ('/' or '\\'):
path = path + '/'
# create a temp file
tmpPath = 'c:/'
# create a new temp file, and the content is 0001, 0002, ...
tmp = open(tmpPath + 'tmp.txt', 'w')
for i in range(len(os.listdir(path))):
tmp.write("%04d\n" %i)
tmp.close()
# get the content of created temp file
tmp = open(tmpPath + 'tmp.txt','r')
tmpContent = tmp.read().split()
tmp.close()
print ("The temp file is located in", tmpPath)
while(True):
wait = raw_input("Would you want to save the tmp file(y/n)? ")
if wait == 'n' or wait == 'N':
os.remove(tmpPath+'tmp.txt')
break
elif wait == 'y' or wait == 'Y':
break
else:
continue
# create a new list to store the new file name,
# such as 0001.bmp, 0002.bmp
newNames = []
for item in tmpContent:
newNames.append(prefix + item + '.' + ext)
# rename
j = 0
for file in os.listdir(path):
if file.split('.')[1] == ext:
os.rename(path+file, path+newNames[j])
j = j+1
else:
continue
|
{"/test.py": ["/hos.py"]}
|
30,601
|
Rookiee/Research_Related
|
refs/heads/master
|
/thresh.py
|
# coding: utf-8
# 渐进显示, 打断时输出阈值
import numpy as np
import cv2
img = cv2.imread("D:/Pics/boldt.jpg",0)
cv2.namedWindow("Test")
for i in np.arange(256):
ret, binary = cv2.threshold(img,i,255, cv2.THRESH_BINARY);
cv2.imshow("Test", binary);
if cv2.waitKey(30)==ord('q'):
print i
break
cv2.destroyAllWindows()
|
{"/test.py": ["/hos.py"]}
|
30,602
|
Rookiee/Research_Related
|
refs/heads/master
|
/reconstruct.py
|
# coding: utf-8
import cv2
import numpy as np
size = 3
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))
def myerosin(img):
erodeImg = cv2.erode(img, kernel, iterations = 1)
img = cv2.max(erodeImg, img)
return erodeImg
def mydilation(img):
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
dilationImg = cv2.dilate(img, kernel)
img = cv2.min(dilationImg, img)
return dilationImg
loopNum = 2
def ReconstructionByErosion(img): #closing
for i in np.arange(loopNum):
img = myerosin(img)
return img
def ReconstructionByDilation(img): #opening
for i in np.arange(loopNum):
img = mydilation(img)
return img
def ClosingOpening(img):
img = ReconstructionByDilation(ReconstructionByErosion(img))
return img
img = cv2.imread("C:\\Users\\Administrator\\Desktop\\result.jpg",0)
imgCopy = img.copy()
# dst = ReconstructionByErosion(img)
dst = ClosingOpening(imgCopy)
cv2.imshow("original", img)
cv2.imshow("New", dst)
print dst == imgCopy
cv2.waitKey()
|
{"/test.py": ["/hos.py"]}
|
30,603
|
Rookiee/Research_Related
|
refs/heads/master
|
/differenceBetweenTwoFrames.py
|
import cv2
import numpy as np
img1 = cv2.imread("C:/Users/Administrator/Desktop/0027.BMP")
img2 = cv2.imread("C:/Users/Administrator/Desktop/result2.BMP")
img1Gray = cv2.imread("C:/Users/Administrator/Desktop/0027.BMP", 0)
img2Gray = cv2.imread("C:/Users/Administrator/Desktop/result2.BMP",0)
imgGray = np.ones(img1.shape, img1Gray.dtype)
img = np.zeros(img1.shape, np.uint8)
# cv2.absdiff(img1Gray, img2Gray, imgGray)
imgGray = img1Gray - img2Gray
cv2.absdiff(img1, img2, img)
cv2.imshow("result", img)
cv2.imshow("grayResult", imgGray)
if cv2.waitKey() == ord('q'):
cv2.destroyAllWindows()
|
{"/test.py": ["/hos.py"]}
|
30,604
|
Rookiee/Research_Related
|
refs/heads/master
|
/hos.py
|
# coding: utf-8
'''
按照文章方法计算四阶矩,函数为get4moments
'''
import cv2
import numpy as np
# Img: 待处理的Img; N:size of kernel
def get4moments(Img, N):
# 使用均值滤波对原始图像处理
img1 = cv2.blur(Img, (N, N))
# 因为要计算四阶矩,对一个随机变量X,四阶矩即 E(X^4)
# 随机变量X 是原始图像 和 模糊后图像 的差值
diff = (Img - img1) ** 4
# 再对X进行均值滤波
img2 = cv2.blur(diff, (N, N))
# HOSMap = img2**4
HOSMap = img2.copy() # 同时复制大小和数据类型
# 下面定义HOSMap的方法无效,?
# HOSMap = np.zeros(img2.shape, dtype = Img.dtype)
'''
numpy.ndenumerate: Multidimensional index iterator.
Example:
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
'''
for (x, y) in np.ndenumerate(HOSMap):
# x: 每一个像素(坐标、索引),
# y: 对应的像素值
# 由于计算的是四阶矩,每个像素都要4次方,其值可能大于255
# 如果大于255,用100除, 100是文章中推荐的值
# print x,y
if y / 100 > 255:
HOSMap[x] = 255
return HOSMap
# 一般先对原始图像进行高斯模糊, N为kernel大小,
def GaussianFilter(img, N):
"""
:param img:
:param N:
:return:
"""
return cv2.GaussianBlur(img, (N, N), 0)
if __name__ == '__main__':
# # 绝对路径
absName = "C:/Users/Administrator/Desktop/0100.bmp"
img = cv2.imread(absName, 0)
imgGaussian = GaussianFilter(img, 7)
HosMap = get4moments(imgGaussian, 3)
# 对获取的图像进行50次叠加
HosMap *= 50
# 如果叠加后某像素大于255, 则取255
for (x,y) in np.ndenumerate(HosMap):
if y > 255:
HosMap[x] = 255
# 二值化
ret, HosMap = cv2.threshold(HosMap, 128, 255, cv2.THRESH_BINARY)
cv2.imshow("result", HosMap)
cv2.imwrite("C:/Users/Administrator/Desktop/test.bmp", HosMap)
'''
# imgSample = []
# for i in range(30):
# imgSample.append(GaussianFilter(img, 5))
# imgSample[i] = get4moments(imgSample[i],3)
# print i
# lastImg = np.zeros(img.shape, np.uint8)
# index = np.arange(30)
# i = 0
# for singleImg in imgSample:
# print index[i]
# i = i + 1
# lastImg = lastImg + singleImg
# for (x, y) in np.ndenumerate(lastImg):
# if y > 255:
# print y, 'at', x, "bigger than 255"
# lastImg[x] = 255
# cv2.imshow("test", lastImg)
'''
kernel = np.ones((3,3), np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
cv2.imshow("Closing", closing)
cv2.imwrite("C:/Users/Administrator/Desktop/closing.bmp", closing)
canny = cv2.Canny(closing, 100,200)
cv2.imshow("Contour", canny)
if cv2.waitKey() == 27:
cv2.destroyWindow()
|
{"/test.py": ["/hos.py"]}
|
30,605
|
Rookiee/Research_Related
|
refs/heads/master
|
/afterHOS.py
|
# import cv2
# import numpy as np
# img = cv2.imread("C:/Users/Administrator/Desktop/0100.BMP")
# cv2.imshow("Original", img)
# rows, cols, depth = img.shape
# M = cv2.getRotationMatrix2D((cols/2, rows/2),30, 1)
# dst = cv2.warpAffine(img, M, (cols, rows))
# cv2.imshow("Result", dst)
# print "Original: ", img.shape, img.size
# print "Result: ", dst.shape, dst.size
# if cv2.waitKey() == ord('q'):
# cv2.destroyAllWindows()
import cv2
import numpy as np
img = cv2.imread("C:/Users/Administrator/Desktop/test.bmp")
kernel = np.ones((3,3), np.uint8)
# dilation = cv2.dilate(img, kernel, iterations = 1)
dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
cv2.imshow("result", dilation)
cv2.imwrite("c:/Users/Administrator/Desktop/resule.bmp", dilation)
canny = cv2.Canny(dilation, 100,200)
cv2.imshow("canny", canny)
if cv2.waitKey() == ord('q'):
cv2.destroyAllWindows()
|
{"/test.py": ["/hos.py"]}
|
30,626
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/urls.py
|
from django.conf.urls import url
from . import views
app_name='quote'
urlpatterns=[
url(r'^$',views.IndexView.as_view(),name='index'),
url(r'^add/$',views.add,name='add'),
url(r'^tag/(?P<pk>[0-9]+)/$',views.TagView.as_view(),name='tag'),
url(r'^search/$',views.search,name='search')
]
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,627
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/templatetags/quote_tags.py
|
from ..models import Tag
from django import template
from django.db.models.aggregates import Count
register=template.Library()
@register.simple_tag
def get_most_tags():
return Tag.objects.annotate(num_quote=Count('quote'))
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,628
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/admin.py
|
from django.contrib import admin
from .models import Tag,Quote
# Register your models here.
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
class QuoteAdmin(admin.ModelAdmin):
list_display = ['text','author','created_time']
admin.site.register(Tag,TagAdmin)
admin.site.register(Quote,QuoteAdmin)
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,629
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/models.py
|
from django.db import models
# Create your models here.
class Tag(models.Model):
name=models.CharField(max_length=20)
def __str__(self):
return self.name
class Quote(models.Model):
text=models.TextField()
author=models.CharField(max_length=20)
created_time=models.DateTimeField(auto_now_add=True)
tags=models.ManyToManyField(Tag,blank=True)
def __str__(self):
return self.text
class Meta:
ordering=['-created_time']
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,630
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/migrations/0002_auto_20180203_1258.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-02-03 04:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quote', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='quote',
old_name='Tags',
new_name='tags',
),
]
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,631
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/forms.py
|
from django import forms
from .models import Tag,Quote
class QuoteForm(forms.ModelForm):
# tags=forms.MultipleChoiceField(Tag.objects.all())
class Meta:
model=Quote
fields=['text','author','tags']
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,632
|
nickcxm/quote-site
|
refs/heads/master
|
/quotes/quote/views.py
|
from django.shortcuts import render,get_object_or_404,redirect
from django.views.generic import ListView
from .models import Tag,Quote
from django.db.models import Q
from .forms import QuoteForm
# Create your views here.
class IndexView(ListView):
model = Quote
template_name = 'index.html'
context_object_name = 'quote_list'
def get_context_data(self, **kwargs):
context=super(IndexView,self).get_context_data(**kwargs)
form=QuoteForm()
quote_list=Quote.objects.all()
context.update({
'form':form,
'quote_list':quote_list
})
return context
paginate_by = 1
class TagView(ListView):
model = Quote
template_name = 'index.html'
context_object_name = 'quote_list'
def get_context_data(self, **kwargs):
context=super(TagView,self).get_context_data(**kwargs)
tag=get_object_or_404(Tag,pk=self.kwargs.get('pk'))
form=QuoteForm()
quote_list=Quote.objects.all().filter(tags=tag)
context.update({
'form':form,
'quote_list':quote_list
})
return context
def add(request):
if request.method=="POST":
form=QuoteForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
else:
quote_list=Quote.objects.all()
context={'form':form,
'quote_list':quote_list
}
return render(request,'index.html',context=context)
return redirect('/')
def search(request):
q=request.GET.get('q')
error_msg=''
form=QuoteForm()
if not q:
error_msg="input some words!"
return render(request,'index.html',{'error_msg':error_msg})
quote_list=Quote.objects.filter(Q(text__icontains=q))
return render(request,'index.html',{'error_msg':error_msg,
'form':form,
'quote_list':quote_list
})
|
{"/quotes/quote/templatetags/quote_tags.py": ["/quotes/quote/models.py"], "/quotes/quote/admin.py": ["/quotes/quote/models.py"], "/quotes/quote/forms.py": ["/quotes/quote/models.py"], "/quotes/quote/views.py": ["/quotes/quote/models.py", "/quotes/quote/forms.py"]}
|
30,640
|
BrightTux/trajClassifier
|
refs/heads/master
|
/generateList.py
|
f= open("test.csv","w+")
startNum = 1
endNum = 44
for i in range(startNum, endNum+1):
f.write("%d;NoActivity\r\n" % (i))
i += 1
startNum = 45
endNum = 97
for i in range(startNum, endNum+1):
f.write("%d;Traj1\r\n" % (i))
i += 1
startNum = 98
endNum = 117
for i in range(startNum, endNum+1):
f.write("%d;Traj2\r\n" % (i))
i += 1
startNum = 118
endNum = 167
for i in range(startNum, endNum+1):
f.write("%d;Traj3\r\n" % (i))
i += 1
startNum = 168
endNum = 212
for i in range(startNum, endNum+1):
f.write("%d;Traj5\r\n" % (i))
i += 1
startNum = 213
endNum = 256
for i in range(startNum, endNum+1):
f.write("%d;Traj6\r\n" % (i))
i += 1
startNum = 257
endNum = 286
for i in range(startNum, endNum+1):
f.write("%d;Traj7\r\n" % (i))
i += 1
startNum = 287
endNum = 305
for i in range(startNum, endNum+1):
f.write("%d;Traj8\r\n" % (i))
i += 1
startNum = 306
endNum = 329
for i in range(startNum, endNum+1):
f.write("%d;Traj9\r\n" % (i))
i += 1
f.close()
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,641
|
BrightTux/trajClassifier
|
refs/heads/master
|
/hello.py
|
# hello.py
import numpy as np
from numpy import array
np.set_printoptions(precision=2)
def rescale_list(input_list, size):
"""Given a list and a size, return a rescaled/samples list. For example,
if we want a list of size 5 and we have a list of size 25, return a new
list of size five which is every 5th element of the origina list."""
assert len(input_list) >= size
# Get the number to skip between iterations.
skip = len(input_list) // size
# Build our new output.
output = [input_list[i] for i in range(0, len(input_list), skip)]
# Cut off the last one if needed.
return output[:size]
if __name__ == '__main__':
print("hello world")
size = 4
input_list = [1,2,3,4,5]
input_list2 = ['train/931\\0001.jpg', 'train/931\\0002.jpg', 'train/931\\0003.jpg', 'train/931\\0004.jpg', 'train/931\\0005.jpg', 'train/931\\0006.jpg', 'train/931\\0007.jpg', 'train/931\\0008.jpg', 'train/931\\0009.jpg', 'train/931\\0010.jpg', 'train/931\\0011.jpg', 'train/931\\0012.jpg', 'train/931\\0013.jpg', 'train/931\\0014.jpg',
'train/931\\0015.jpg', 'train/931\\0016.jpg', 'train/931\\0017.jpg',
'train/931\\0018.jpg', 'train/931\\0019.jpg', 'train/931\\0020.jpg', 'train/931\\0021.jpg', 'train/931\\0022.jpg', 'train/931\\0023.jpg', 'train/931\\0024.jpg', 'train/931\\0025.jpg', 'train/931\\0026.jpg', 'train/931\\0027.jpg', 'train/931\\0028.jpg', 'train/931\\0029.jpg', 'train/931\\0030.jpg', 'train/931\\0031.jpg', 'train/931\\0032.jpg', 'train/931\\0033.jpg', 'train/931\\0034.jpg',
'train/931\\0035.jpg', 'train/931\\0036.jpg', 'train/931\\0037.jpg', 'train/931\\0038.jpg', 'train/931\\0039.jpg', 'train/931\\0040.jpg', 'train/931\\0041.jpg', 'train/931\\0042.jpg', 'train/931\\0043.jpg',
'train/931\\0044.jpg', 'train/931\\0045.jpg', 'train/931\\0046.jpg', 'train/931\\0047.jpg', 'train/931\\0048.jpg', 'train/931\\0049.jpg', 'train/931\\0050.jpg', 'train/931\\0051.jpg', 'train/931\\0052.jpg',
'train/931\\0053.jpg', 'train/931\\0054.jpg', 'train/931\\0055.jpg', 'train/931\\0056.jpg', 'train/931\\0057.jpg', 'train/931\\0058.jpg', 'train/931\\0059.jpg', 'train/931\\0060.jpg', 'train/931\\0061.jpg',
'train/931\\0062.jpg', 'train/931\\0063.jpg', 'train/931\\0064.jpg', 'train/931\\0065.jpg', 'train/931\\0066.jpg', 'train/931\\0067.jpg', 'train/931\\0068.jpg', 'train/931\\0069.jpg', 'train/931\\0070.jpg',
'train/931\\0071.jpg', 'train/931\\0072.jpg', 'train/931\\0073.jpg', 'train/931\\0074.jpg', 'train/931\\0075.jpg', 'train/931\\0076.jpg', 'train/931\\0077.jpg', 'train/931\\0078.jpg',
'train/931\\0079.jpg', 'train/931\\0080.jpg', 'train/931\\0081.jpg', 'train/931\\0082.jpg', 'train/931\\0083.jpg', 'train/931\\0084.jpg', 'train/931\\0085.jpg', 'train/931\\0086.jpg', 'train/931\\0087.jpg',
'train/931\\0088.jpg', 'train/931\\0089.jpg', 'train/931\\0090.jpg', 'train/931\\0091.jpg', 'train/931\\0092.jpg', 'train/931\\0093.jpg', 'train/931\\0094.jpg', 'train/931\\0095.jpg', 'train/931\\0096.jpg',
'train/931\\0097.jpg', 'train/931\\0098.jpg', 'train/931\\0099.jpg', 'train/931\\0100.jpg', 'train/931\\0101.jpg', 'train/931\\0102.jpg', 'train/931\\0103.jpg', 'train/931\\0104.jpg'
, 'train/931\\0105.jpg', 'train/931\\0106.jpg', 'train/931\\0107.jpg', 'train/931\\0108.jpg']
input_list2 = ['train/931\\0001.jpg', 'train/931\\0002.jpg', 'train/931\\0003.jpg']
print(rescale_list(input_list, size))
output = [x for pair in zip(input_list,input_list) for x in pair]
print(output)
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,642
|
BrightTux/trajClassifier
|
refs/heads/master
|
/feature_extract.py
|
"""
This script generates extracted features for each video, which other
models make use of.
You can change you sequence length and limit to a set number of classes
below.
class_limit is an integer that denotes the first N classes you want to
extract features from. This is useful is you don't want to wait to
extract all 101 classes. For instance, set class_limit = 8 to just
extract features for the first 8 (alphabetical) classes in the dataset.
Then set the same number when training models.
"""
import numpy as np
import glob
import os
from os import walk
# from data import DataSet
from dataclass import DataGenerator
from extractor import Extractor
from tqdm import tqdm
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
# Set defaults.
seq_length = 40
class_limit = None # Number of classes to extract. Can be 1-101 or None for all.
# Get the dataset.
target_height, target_width, channel_size = 299, 299, 3
seq_length = 40
# Parameters
params = {'shape_h': target_height, 'shape_w': target_width,
'seq_length': seq_length,
'dim': (seq_length,target_height,target_width),
'batch_size': 1,
'n_classes': 9,
'n_channels': channel_size,
'shuffle': True,
'bool_addnoise': True
}
train_list = patch_path('train_random_small.csv')
f_train= open(train_list,"r")
fread_train = f_train.readlines()
x_train_input = []
y_train_label = []
y_classes = ["NoActivity", "Traj1", "Traj2", "Traj3", "Traj5", "Traj6", "Traj7", "Traj8", "Traj9"]
for x in fread_train:
a,b = x.split(";")
x_train_input.append(a)
y_train_label.append(y_classes.index(b.strip()))
# (samples,time, rows, cols, channels)
# X: (1, 52, 15, 128, 1) means that you have only one sample that is a sequence of 52 images.
# ------- Training data: ----------------------------------------------------
input_dir = "train/"
sequences = [os.path.join(input_dir, f) for f in x_train_input]
seq_train_x = []
f1_train = []
for index, i in enumerate(sequences):
for (dirpath, dirnames, filenames) in walk(i):
for x in filenames:
# f1_train.extend(filenames)
f1_train.append(os.path.join(dirpath, x))
seq_train_x.append(f1_train)
f1_train = []
seq_length = [len(f) for f in seq_train_x]
data = DataGenerator(seq_train_x, y_train_label, **params)
# data = DataSet(seq_length=seq_length, class_limit=class_limit)
# get the model.
model = Extractor()
# Loop through data.
pbar = tqdm(total=len(data.data))
# sequences = [os.path.join(input_dir, f) for f in x_train_input]
#for video in data.data:
for index, i in enumerate(sequences):
# Get the path to the sequence for this video.
#path = os.path.join('train', video[0]) # numpy will auto-append .npy
path = i
# Check if we already have it.
if os.path.isfile(path + '.npy'):
pbar.update(1)
continue
# Get the frames for this video.
frames = sorted(glob.glob(os.path.join(path, '*jpg')))
# Now downsample to just the ones we need.
frames = data.rescale_list(frames, 40)
# Now loop through and extract features to build the sequence.
sequence = []
for image in frames:
features = model.extract(image)
sequence.append(features)
# Save the sequence.
np.save(path, sequence)
pbar.update(1)
pbar.close()
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,643
|
BrightTux/trajClassifier
|
refs/heads/master
|
/processor.py
|
"""
Process an image that we can pass to our networks.
"""
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
def process_image(image, target_shape, add_noise):
"""Given an image, process it and return the array."""
# Load the image.
h, w, _ = target_shape
bool_addnoise = add_noise
mask_img = './img_mask.jpg'
if (_ == 3):
image = load_img(image, target_size=(h, w))
mask = load_img(mask_img, target_size=(h, w))
elif (_ == 1):
image = load_img(image, grayscale=True, target_size=(h, w))
mask = load_img(mask_img, grayscale=True, target_size=(h, w))
else:
print("Warning ... unsupported number of channels")
# Turn it into numpy, normalize and return.
img_arr = img_to_array(image)
mask_arr = img_to_array(mask)
x = (img_arr / 255.).astype(np.float32)
x_mask = (mask_arr / 255.).astype(np.float32)
x = x*x_mask
#print(x.shape)
if(bool_addnoise):
noise_factor = 0.01
x = x + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x.shape)
x = x*x_mask
x = np.clip(x, 0., 1.)
return x
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,644
|
BrightTux/trajClassifier
|
refs/heads/master
|
/train.py
|
# https://github.com/farquasar1/ConvLSTM.git
import matplotlib
matplotlib.use('agg')
import os
from os import walk
import time
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras import backend as K
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger
K.set_image_dim_ordering('tf')
#from processor import process_image
from keras.layers import (ConvLSTM2D, BatchNormalization, Conv3D, Conv2D, Flatten, LSTM, Reshape,
TimeDistributed, MaxPooling2D, MaxPooling3D, UpSampling2D, Input, merge, Dense, Activation, Dropout)
from keras.models import Sequential, Model
from keras import losses
import random
from keras.utils import plot_model
from dataclass import DataGenerator
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
cwd = os.getcwd()
print(cwd)
data_format='channels_last'
# clear the console.
#os.system('cls' if os.name == 'nt' else 'clear')
target_height, target_width, channel_size = 240, 360, 1
seq_length = 40
model = '3DConv2Dpool-graylarge-withMask'
# Parameters
params = {'shape_h': target_height, 'shape_w': target_width,
'seq_length': seq_length,
'dim': (seq_length,target_height,target_width),
'batch_size': 1,
'n_classes': 9,
'n_channels': channel_size,
'shuffle': True,
'bool_addnoise': True
}
# ------------------------------------------------------------------------------------------------------------------------
def class_convLstm_clare(input_shape):
c = 32
activation_fn = 'relu'
kernal_size = (2,2)
num_classes = 9
return_sequences_setting = True
input_img = Input(input_shape, name='input')
# ------------------- NOT USING -------------------------------------------------------------------------------------------------------
# # normal conv network to resize the img
# # input input_shape = batch_size x rows x cols x channel
# # input input_shape = batch_size x 640 x 480 x 3
#
# x = TimeDistributed(Conv2D(128, kernal_size, activation='relu', padding='same',data_format='channels_last'))(input_img)
# c0 = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(x)
# # output size = (batch, new_rows, new_cols, filters)
# # output size = (batch_size, 320, 240, 128)
# ------------------- NOT USING -------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------------------
# (samples,time, rows, cols, channels)
# X: (1, 52, 15, 128, 1) means that you have only one sample that is a sequence of 52 images.
# start of convlstm network
print("input_img size: ", input_img)
x = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn, return_sequences=return_sequences_setting)(input_img)
#x = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
c1 = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c1)
x = Dropout(0.25)(x)
x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
#x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
c2 = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c2)
x = Dropout(0.25)(x)
x = ConvLSTM2D(nb_filter=3 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
#x = ConvLSTM2D(nb_filter=3 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
c3 = ConvLSTM2D(nb_filter=3 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c3)
x = Dropout(0.25)(x)
x = ConvLSTM2D(nb_filter=4 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
#x = ConvLSTM2D(nb_filter=4 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
c4 = ConvLSTM2D(nb_filter=4 * c, nb_row=3, nb_col=3, border_mode='same', activation=activation_fn,return_sequences=return_sequences_setting)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c4)
c5 = Dropout(0.25)(x)
x = TimeDistributed(Flatten(), name='flatten')(c5)
x = TimeDistributed(Dense(256, activation='relu'))(x)
c6 = Dropout(0.25)(x)
#output = TimeDistributed(Dense(num_classes, activation='softmax'), name='output')(c6)
output = Dense(num_classes, activation='softmax', name='output')(c6)
model = Model(input_img, output=[output])
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# model.summary()
return model
# ----------------------------------------------------------------------------------------------------------------------------------
def class_3dconv_clare(input_shape):
c = 4
activation_fn = 'relu'
kernal_size = 2
num_classes = 9
dropout_val = 0.15
return_sequences_setting = True
input_img = Input(input_shape, name='input')
# start of 3dconv network
print("input_img size: ", input_img)
x = Conv3D(kernel_size=kernal_size ,filters=4*c, padding='same', activation=activation_fn)(input_img)
x = Conv3D(kernel_size=kernal_size ,filters=4*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=4*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=4*c, padding='same', activation=activation_fn)(x)
c1 = Conv3D(kernel_size=kernal_size ,filters=4*c, padding='same', activation=activation_fn)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c1)
x = Dropout(dropout_val)(x)
x = Conv3D(kernel_size=kernal_size ,filters=8*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=8*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=8*c, padding='same', activation=activation_fn)(x)
c2 = Conv3D(kernel_size=kernal_size ,filters=8*c, padding='same', activation=activation_fn)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c2)
x = Dropout(dropout_val)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
c3 = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c3)
x = Dropout(dropout_val)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
c4 = Conv3D(kernel_size=kernal_size ,filters=16*c, padding='same', activation=activation_fn)(x)
x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c4)
#x = MaxPooling3D()(x)
# c5 = Dropout(0.25)(x)
#
# x = TimeDistributed(Flatten(), name='flatten')(c5)
# x = TimeDistributed(Dense(256, activation='relu'))(x)
# c6 = Dropout(0.25)(x)
#
# #output = TimeDistributed(Dense(num_classes, activation='softmax'), name='output')(c6)
#
# output = Dense(num_classes, activation='softmax', name='output')(c6)
#
# model = Model(input_img, output=[output])
# model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
#
c5 = Dropout(dropout_val)(x)
x = TimeDistributed(Flatten(), name='flatten')(c5)
#x = LSTM(512, return_sequences=True)(x)
x = LSTM(256, return_sequences=True)(x)
#x = TimeDistributed(Dense(128, activation='relu'))(x)
c6 = Dropout(dropout_val)(x)
output = TimeDistributed(Dense(num_classes, activation='softmax'), name='output')(c6)
#reshape = Reshape((128))(c6)
#output = Dense(num_classes, activation='softmax', name='output')(c6)
model = Model(input_img, output=[output])
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# model.summary()
return model
# ------------------------------------------------------------------------------------------------------------------------
def lrcn(input_shape):
"""Build a CNN into RNN.
Starting version from:
https://github.com/udacity/self-driving-car/blob/master/
steering-models/community-models/chauffeur/models.py
Heavily influenced by VGG-16:
https://arxiv.org/abs/1409.1556
Also known as an LRCN:
https://arxiv.org/pdf/1411.4389.pdf
"""
num_classes = 9
input_img = Input(input_shape, name='input')
# model = Sequential()
# model.add(TimeDistributed(Conv2D(32, (7, 7), strides=(2, 2),
# activation='relu', padding='same'), input_shape=input_img))
# model.add(TimeDistributed(Conv2D(32, (3,3),
# kernel_initializer="he_normal", activation='relu')))
# model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
# model.add(TimeDistributed(Conv2D(64, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(Conv2D(64, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
# model.add(TimeDistributed(Conv2D(128, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(Conv2D(128, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
# model.add(TimeDistributed(Conv2D(256, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(Conv2D(256, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
# model.add(TimeDistributed(Conv2D(512, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(Conv2D(512, (3,3),
# padding='same', activation='relu')))
# model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
# model.add(TimeDistributed(Flatten()))
# model.add(Dropout(0.5))
# model.add(LSTM(256, return_sequences=False, dropout=0.5))
# model.add(Dense(num_classes, activation='softmax'))
# return model
# -------------------------------
x = TimeDistributed(Conv2D(32, (7, 7), strides=(2, 2), activation='relu', padding='same'))(input_img)
x = TimeDistributed(Conv2D(32, (3,3), kernel_initializer="he_normal", activation='relu'))(x)
c1 = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2)))(x)
x = TimeDistributed(Conv2D(64, (3,3), padding='same', activation='relu'))(c1)
x = TimeDistributed(Conv2D(64, (3,3), padding='same', activation='relu'))(x)
c2 = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2)))(x)
x = TimeDistributed(Conv2D(128, (3,3), padding='same', activation='relu'))(c2)
x = TimeDistributed(Conv2D(128, (3,3), padding='same', activation='relu'))(x)
c3 = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2)))(x)
x = TimeDistributed(Conv2D(256, (3,3), padding='same', activation='relu'))(c3)
x = TimeDistributed(Conv2D(256, (3,3), padding='same', activation='relu'))(x)
c4 = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2)))(x)
x = TimeDistributed(Conv2D(512, (3,3), padding='same', activation='relu'))(c4)
x = TimeDistributed(Conv2D(512, (3,3), padding='same', activation='relu'))(x)
c5 = TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2)))(x)
c6 = TimeDistributed(Flatten(), name='flatten')(c5)
x = Dropout(0.5)(c6)
x = LSTM(256, return_sequences=False, dropout=0.5)(x)
output = Dense(num_classes, activation='softmax', name='output')(x)
model = Model(input_img, output=[output])
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
# ------------------------------------------------------------------------------------------------------------------------
def net_summary(net):
import sys
from io import StringIO
# Temporarily redirect stdout, print net summary and then restore stdout
msg = StringIO()
out = sys.stdout
sys.stdout = msg
net.summary()
sys.stdout = out
return msg.getvalue()
def train_model(data_type, image_shape, class_limit, model, batch_size, network=None, nb_epochs=100, train_list=None, test_list=None, jitter=None, output_dir=None):
# Helper: Save the model.
checkpointer = ModelCheckpoint(
filepath=os.path.join('data', 'weights', model + '-' + data_type + \
'.{epoch:03d}-{val_acc:.3f}.hdf5'),
verbose=1,
save_best_only=True, monitor='val_acc')
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('data', 'logs', model + '-tensorbard.log'), write_images=True)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(monitor='val_acc',
min_delta=0,
patience=200,
verbose=0,
mode='auto')
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
str(timestamp) + '.log'))
fread_train = train_list.readlines()
fread_test = test_list.readlines()
x_train_input = []
y_train_label = []
y_classes = ["NoActivity", "Traj1", "Traj2", "Traj3", "Traj5", "Traj6", "Traj7", "Traj8", "Traj9"]
for x in fread_train:
a,b = x.split(";")
x_train_input.append(a)
y_train_label.append(y_classes.index(b.strip()))
x_test_input = []
y_test_label = []
for x in fread_test:
a,b = x.split(";")
x_test_input.append(a)
y_test_label.append(y_classes.index(b.strip()))
# (samples,time, rows, cols, channels)
# X: (1, 52, 15, 128, 1) means that you have only one sample that is a sequence of 52 images.
# ------- Training data: ----------------------------------------------------
input_dir = "train/"
sequences = [os.path.join(input_dir, f) for f in x_train_input]
seq_train_x = []
f1_train = []
for index, i in enumerate(sequences):
for (dirpath, dirnames, filenames) in walk(i):
for x in filenames:
# f1_train.extend(filenames)
f1_train.append(os.path.join(dirpath, x))
seq_train_x.append(f1_train)
f1_train = []
seq_length = [len(f) for f in seq_train_x]
# testing to see if it works:
# print(seq_train_x[0])
# print(y_train_label[0])
# print(seq_length[0])
# print(sequences[0])
# ------- Training data: ----------------------------------------------------
# ------- Testing data: -----------------------------------------------------
input_dir = "test/"
sequences_test = [os.path.join(input_dir, f) for f in x_test_input]
seq_test_x = []
f1_test = []
for index, i in enumerate(sequences_test):
for (dirpath, dirnames, filenames) in walk(i):
for x in filenames:
# f1_test.extend(filenames)
f1_test.append( os.path.join(dirpath, x))
seq_test_x.append(f1_test)
f1_test = []
seq_length_test = [len(f) for f in seq_test_x]
# for i, v0 in enumerate(seq_test_x):
# for j, value in enumerate(seq_test_x[i]):
# seq_test_x[i][j] = os.path.join(input_dir, value)
# ------- Testing data: -----------------------------------------------------
# Generators
training_generator = DataGenerator(seq_train_x, y_train_label, **params)
validation_generator = DataGenerator(seq_test_x, y_test_label, **params)
# Setup model and train
# (samples,time, rows, cols, channels)
# X: (1, 52, 15, 128, 1) means that you have only one sample that is a sequence of 52 images.
input_shape = (None, target_height, target_width, channel_size)
model = network(input_shape)
print(net_summary(model))
# print ("(---------------------------- DEBUG ----------------------------)")
# print("generator: ", generator)
model.fit_generator(training_generator, epochs=nb_epochs, validation_data=validation_generator, use_multiprocessing=True, workers=5,
callbacks=[tb, checkpointer, early_stopper, csv_logger])
# ------------------------------------------------------------------------------------------------------------------------
# TESTING FEATURES
# ------------------------------------------------------------------------------------------------------------------------
# for x in x_test_input:
# print(x)
## try to display the input
## imread = second flag 1 = normal(rgb), 0 = gray
## mypath = "train/"+x_test_input[0]
# f = []
# for (dirpath, dirnames, filenames) in walk(mypath):
# f.extend(filenames)
# break
# print(f)
#
# for filename in f:
# img = cv2.imread("train/"+x_test_input[0]+ "/" +filename,1)
# cv2.imshow('image',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# ------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
if __name__ == '__main__':
# Load data split
train_list = patch_path('train_random.csv')
test_list = patch_path('test_random.csv')
f_train= open(train_list,"r")
f_test= open(test_list,"r")
#model = 'convlstm2d'
saved_model = None # None or weights file
class_limit = 9 # int, can be 1-101 or None
load_to_memory = False # pre-load the sequences into memory
data_type = 'images'
#height, width, depth = 480, 640, 3 # input image size
image_shape = (target_height, target_width, 1)
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
str(timestamp) + '.log'))
network = class_3dconv_clare # input_shape = (None, 96, 108, 3)
#network = class_convLstm_clare # input_shape = (None, 96, 108, 3)
#plot_model(network, to_file=model+'_model.png', show_shapes=True)
batch_size = 1
# def train_model(data_type, image_shape, class_limit, model, batch_size,network=None, nb_epochs=100, train_list=None, test_list=None, jitter=None, output_dir=None):
train_model(data_type, image_shape, 9, model, batch_size, network, nb_epochs=5000, train_list=f_train, test_list=f_test, output_dir='tmp1')
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,645
|
BrightTux/trajClassifier
|
refs/heads/master
|
/randomize.py
|
# % randomly order test/train list
f= open("test_random.csv","w+")
import random
with open('test.csv') as f_input:
lines = f_input.read().splitlines()
random.shuffle(lines)
# print ('\n'.join(lines))
f.write('\n'.join(lines))
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
30,646
|
BrightTux/trajClassifier
|
refs/heads/master
|
/dataclass.py
|
import numpy as np
import keras
from processor import process_image
import glob
import os
import csv
from keras.preprocessing.image import img_to_array, load_img
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, shape_h, shape_w, seq_length, batch_size=10, dim=(None,480,640), n_channels=3,
n_classes=9, shuffle=True, bool_addnoise=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.image_shape = (shape_h,shape_w, n_channels)
self.seq_length = seq_length
self.bool_addnoise = bool_addnoise
#print ("ID and labels:" , list_IDs, labels)
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.data = self.get_data()
# print out x and y size:
print("X size: ", len(list_IDs))
print("y size: ", len(labels))
@staticmethod
def get_data():
"""Load our data from file."""
with open('train_random.csv', 'r') as fin:
reader = csv.reader(fin)
data = list(reader)
return data
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
add_noise = self.bool_addnoise
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
list_labels_temp = [self.labels[g] for g in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp, list_labels_temp, add_noise)
#print( "X, y shapes:", X.shape, ",", y.shape)
return X, y
def build_image_sequence(self, frames, add_noise):
"""Given a set of frames (filenames), build our sequence."""
bool_addnoise = add_noise
return [process_image(frames, self.image_shape, bool_addnoise)]
#return [process_image(x, self.image_shape) for x in frames]
def rescale_list(self, input_list, size):
"""Given a list and a size, return a rescaled/samples list. For example,
if we want a list of size 5 and we have a list of size 25, return a new
list of size five which is every 5th element of the origina list."""
# assert len(input_list) >= size
if (len(input_list) < size):
#print(input_list)
output = [x for pair in zip(input_list,input_list) for x in pair]
else:
# Get the number to skip between iterations.
skip = len(input_list) // size
# Build our new output.
output = [input_list[i] for i in range(0, len(input_list), skip)]
# Cut off the last one if needed.
return output[:size]
def __data_generation(self, list_IDs_temp, list_labels_temp, add_noise):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
bool_addnoise = add_noise
#print("list_IDs_temp ", list_IDs_temp)
#print("list_labels_temp ", list_labels_temp)
# Generate data
for i, ID in enumerate(list_IDs_temp):
lst = list(self.dim)
#lst[0] = len(ID)
#self.dim = tuple(lst)
#print("X (in i): ", X.shape)
frames = self.rescale_list(ID, lst[0])
for j, valID in enumerate(frames):
# Store sample
X[i,:] = self.build_image_sequence(valID, bool_addnoise)
# change to the following line if its the lstmconv2d network
#y = np.empty((self.batch_size, lst[0]), dtype=int)
#change to the following line if its the conv3d network
y = np.empty((self.batch_size, 40), dtype=int)
for i, ID in enumerate(list_labels_temp):
# Store class
y[i] = ID
return X, keras.utils.to_categorical(y, num_classes=self.n_classes)
|
{"/feature_extract.py": ["/dataclass.py"], "/dataclass.py": ["/processor.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.