index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
39,719
|
Rallstad/adventofcode_2020
|
refs/heads/main
|
/1.py
|
from utils import readFile
intNumArr = [int(line) for line in readFile('inputs/inputTask1.txt')]
def task1a(intNumArr):
for i in range(len(intNumArr)):
for j in range(i+1, len(intNumArr)):
value = intNumArr[i] + intNumArr[j]
if value == 2020:
print("%d + %d = %d" %(intNumArr[i], intNumArr[j], value))
print("product: %d" %(intNumArr[i] * intNumArr[j]))
def task1b(intNumArr):
for i in range(len(intNumArr)):
for j in range(i+1, len(intNumArr)):
for k in range(j+1, len(intNumArr)):
value = intNumArr[i] + intNumArr[j] + intNumArr[k]
if value == 2020:
print("%d + %d + %d = %d" %(intNumArr[i], intNumArr[j], intNumArr[k], value))
print("product: %d" %(intNumArr[i] * intNumArr[j] * intNumArr[k]))
print("task 1a")
task1a(intNumArr)
print("task 1b")
task1b(intNumArr)
|
{"/6.py": ["/utils.py"], "/16.py": ["/utils.py"], "/13.py": ["/utils.py"], "/5.py": ["/utils.py"], "/10.py": ["/utils.py"], "/12.py": ["/utils.py"], "/15.py": ["/utils.py"], "/3.py": ["/utils.py"], "/2.py": ["/utils.py"], "/11.py": ["/utils.py"], "/4.py": ["/utils.py"], "/9.py": ["/utils.py"], "/14.py": ["/utils.py"], "/7.py": ["/utils.py"], "/8.py": ["/utils.py"], "/1.py": ["/utils.py"]}
|
39,734
|
arititamang/Paranli
|
refs/heads/master
|
/mysite/employee/forms.py
|
from django import forms
from django.forms.widgets import NumberInput
from captcha.fields import CaptchaField
from .models import Contact
MONTH_CHOICES = (
("01", "January"),
("02", "February"),
("03", "March"),
("04", "April"),
("05", "May"),
("06", "June"),
("07", "July"),
("08", "August"),
("09", "September"),
("10", "October"),
("11", "November"),
("12", "December"),
)
YEAR_CHOICES = [
('1990', '1899'),
('1997', '1898'),
('1896', '1994'),
('1895', '1894'),
('1991', '1893'),
('1892', '1990'),
('1891', '1890'),
('1890', '1898'),
('1991', '1992'),
('1895', '1994'),
('1991', '1893'),
('1892', '1990'),
]
class StudentForm(forms.Form):
cpf = forms.IntegerField(label="CPF/GPF ",
help_text="Enter 14 digit number"
)
Date_of_Birth = forms.DateField(widget=NumberInput(attrs={'type': 'date'}))
Month = forms.ChoiceField(choices=MONTH_CHOICES)
Year = forms.ChoiceField(choices=YEAR_CHOICES)
class Form(forms.Form):
captcha = CaptchaField()
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['cpf', 'dob', 'month', 'otp','year']
widgets = {
'cpf': forms.TextInput(attrs={'class': 'input', 'placeholder': 'Your Name'}),
'dob': forms.DateInput(attrs={'class': 'form-control', 'type': 'date', 'required':'required'}),
'otp': forms.TextInput(attrs={'class': 'otp', 'placeholder' : 'Your message...'}),
}
|
{"/mysite/employee/forms.py": ["/mysite/employee/models.py"]}
|
39,735
|
arititamang/Paranli
|
refs/heads/master
|
/mysite/mysite/views.py
|
from django.shortcuts import render,HttpResponse
def index(request):
return render(request, 'home.html')
#Form
def form(request):
return render(request, 'form.html')
#captchaa
#def captcha(request):
#return render(request, 'captcha.html')
# <ahref='/'>back</a>")
|
{"/mysite/employee/forms.py": ["/mysite/employee/models.py"]}
|
39,736
|
arititamang/Paranli
|
refs/heads/master
|
/mysite/employee/views.py
|
# Create your views here.
from django.shortcuts import render, HttpResponse
from employee . forms import StudentForm
import random
def index(request):
student = StudentForm()
return render(request, "index.html", {'form': student})
return render(request, 'index.html')
|
{"/mysite/employee/forms.py": ["/mysite/employee/models.py"]}
|
39,737
|
arititamang/Paranli
|
refs/heads/master
|
/mysite/employee/models.py
|
from __future__ import unicode_literals
from django.db import models
class Student(models.Model):
cpf = models.IntegerField()
dob = models.DateField()
class Meta:
db_table = "student"
# month = models.ChoiceField()
#year = models.IntegerField()
|
{"/mysite/employee/forms.py": ["/mysite/employee/models.py"]}
|
39,738
|
arititamang/Paranli
|
refs/heads/master
|
/mysite/mysite/otp.py
|
import random
otp=random.randrange(100000, 1000000)
print(otp)
user=int(input("Enter the OTP :"))
if otp== user:
print('Access granted')
else:
print('Access denied')
|
{"/mysite/employee/forms.py": ["/mysite/employee/models.py"]}
|
39,739
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/launchers/train_td3.py
|
"""
Created on Tue Apr 3 21:43:06 2018
@author: Wangyf
"""
'''
File Description:
The main driver file for training / evaluating the agents.
1) parses the command line arguments
2) inits the Burgers Training/Testing Environments, the RL agents
3) perform training/evaluating as specified by the command line arugments.
'''
def run_task(vv, log_dir, exp_name):
import torch
import numpy as np
import copy
import os, sys
import time
import math
import random
import json
from get_args import get_args
from DDPG.train_util import DDPG_train, DDPG_test
from DDPG.DDPG_new import DDPG
from DDPG.util import GaussNoise
from chester import logger
from BurgersEnv.Burgers import Burgers
import utils.ptu as ptu
if torch.cuda.is_available():
ptu.set_gpu_mode(True)
### dump vv
logger.configure(dir=log_dir, exp_name=exp_name)
with open(os.path.join(logger.get_dir(), 'variant.json'), 'w') as f:
json.dump(vv, f, indent=2, sort_keys=True)
### load vv
ddpg_load_epoch = None
if vv['load_path'] is not None:
solution_data_path = vv['solution_data_path']
dx = vv['dx']
test_interval = vv['test_interval']
load_path = os.path.join('data/local', vv['load_path'])
ddpg_load_epoch = str(vv['load_epoch'])
with open(os.path.join(load_path, 'variant.json'), 'r') as f:
vv = json.load(f)
vv['noise_beg'] = 0.1
vv['solution_data_path'] = solution_data_path
vv['test_interval'] = test_interval
if vv.get('dx') is None:
vv['dx'] = dx
### Important: fix numpy and torch seed!
seed = vv['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
### Initialize RL agents
ddpg = DDPG(vv, GaussNoise(initial_sig=vv['noise_beg'], final_sig=vv['noise_end']))
agent = ddpg
if ddpg_load_epoch is not None:
print("load ddpg models from {}".format(os.path.join(load_path, ddpg_load_epoch)))
agent.load(os.path.join(load_path, ddpg_load_epoch))
### Initialize training and testing encironments
env = Burgers(vv, agent=agent)
### train models
print('begining training!')
DDPG_train(vv, env, agent)
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,740
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/DDPG/torch_networks.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import normal, Normal
from torch.nn import init
import numpy as np
def process_state(s, flux, eps, mode='p'):
s_left = s[:, :-1]
s_right = s[:, 1:]
if flux == 'u2':
fs_left = s_left ** 2 / 2.
fs_right = s_right ** 2 / 2.
elif flux == 'u4':
fs_left = s_left ** 4 / 16.
fs_right = s_right ** 4 / 16.
elif flux == 'u3':
fs_left = s_left ** 3 / 9.
fs_right = s_right ** 3 / 9.
elif flux == 'BL':
fs_left = fs_left ** 2 / (fs_left ** 2 + 0.5 * (1-fs_left) ** 2)
fs_right = fs_right ** 2 / (fs_right ** 2 + 0.5 * (1-fs_right) ** 2)
max_left, _ = torch.max(torch.abs(fs_left), dim = 1, keepdim = True)
max_right, _ = torch.max(torch.abs(fs_right), dim = 1, keepdim = True)
max_left += eps
max_right += eps
fs_left /= max_left
fs_right /= max_right
if mode == 'p':
return fs_left, fs_right
elif mode == 'q':
return torch.cat([fs_left, fs_right], dim=-1)
def process_state_roe(s, mode, flux, eps):
s_left = s[:, :-1]
s_right = s[:, 1:]
if flux == 'u2':
fs_left = s_left ** 2 / 2.
fs_right = s_right ** 2 / 2.
elif flux == 'u4':
fs_left = s_left ** 4 / 16.
fs_right = s_right ** 4 / 16.
elif flux == 'u3':
fs_left = s_left ** 3 / 9.
fs_right = s_right ** 3 / 9.
elif flux == 'BL':
fs_left = s_left ** 2 / (s_left ** 2 + 0.5 * (1-s_left) ** 2)
fs_right = s_right ** 2 / (s_right ** 2 + 0.5 * (1-s_right) ** 2)
elif flux.startswith('linear'):
a = float(flux[len('linear'):])
fs_left = s_left * a
fs_right = s_right * a
elif flux == 'identity':
fs_left = s_left
fs_right = s_right
### norm, coupled with roe speed
if mode == 'normalize':
max_left, _ = torch.max(torch.abs(fs_left), dim = 1, keepdim = True)
max_right, _ = torch.max(torch.abs(fs_right), dim = 1, keepdim = True)
max_left += eps
max_right += eps
fs_left /= max_left
fs_right /= max_right
elif mode == 'mix':
max_left, _ = torch.max(torch.abs(fs_left), dim = 1, keepdim = True)
max_right, _ = torch.max(torch.abs(fs_right), dim = 1, keepdim = True)
max_left += eps
max_right += eps
n_fs_left = fs_left / max_left
n_fs_right = fs_right / max_right
fs_left = torch.cat([fs_left, n_fs_left], dim = 1)
fs_right = torch.cat([fs_right, n_fs_right], dim = 1)
if flux == 'u2' or flux == 'u4':
roe_left = (((s_left[:, 3] + s_left[:, 2]) >= 0).float() * 2 - 1).unsqueeze(1)
elif flux == 'u3':
roe_left = (((s_left[:, 3] ** 2 + s_left[:, 2] ** 2 + s_left[:, 2] * s_left[:, 3]) >= 0).float() * 2 - 1).unsqueeze(1)
elif flux == 'BL':
roe_left = 0.5 * (s_left[:, 3] + s_left[:, 2]) - s_left[:, 3] * s_left[:, 2]
roe_left = (((roe_left) >= 0).float() * 2 - 1).unsqueeze(1)
elif flux.startswith('linear'):
a = float(flux[len('linear'):])
sign = 1 if a >=0 else -1
roe_left = torch.FloatTensor([sign]).unsqueeze(1).to(device)
roe_left = roe_left.repeat(len(s_left), 1)
elif flux == 'identity':
roe_left = torch.ones_like((((s_left[:, 3] - s_left[:, 2]) >= 0).float() * 2 - 1).unsqueeze(1))
fs_left = torch.cat((fs_left, roe_left), dim = 1)
if flux == 'u2' or flux == 'u4':
# roe_right = torch.sign(s_right[:, 3] + s_right[:, 2]).unsqueeze(1)
roe_right = (((s_right[:, 3] + s_right[:, 2]) >=0).float() * 2 - 1).unsqueeze(1)
elif flux == 'u3':
# roe_right = torch.sign(s_right[:, 3] ** 2 + s_right[:, 2] ** 2 + s_right[:, 2] * s_right[:, 3]).unsqueeze(1)
roe_right = (((s_right[:, 3] ** 2 + s_right[:, 2] ** 2 + s_right[:, 2] * s_right[:, 3]) >= 0).float() * 2 - 1).unsqueeze(1)
elif flux == 'BL':
roe_right = 0.5 * (s_right[:, 3] + s_right[:, 2]) - s_right[:, 3] * s_right[:, 2]
roe_right = (((roe_right) >= 0).float() * 2 - 1).unsqueeze(1)
elif flux.startswith('linear'):
a = float(flux[len('linear'):])
sign = 1 if a >=0 else -1
roe_right = torch.FloatTensor([sign]).unsqueeze(1).to(device)
roe_right = roe_right.repeat(len(s_right), 1)
elif flux == 'identity':
roe_right = torch.ones_like((((s_right[:, 3] + s_right[:, 2]) >=0).float() * 2 - 1).unsqueeze(1))
fs_right = torch.cat((fs_right, roe_right), dim =1)
return fs_left, fs_right
class weno_coef_DDPG_policy_net_fs(nn.Module):
'''
doc
'''
def __init__(self, state_dim, action_dim, hidden_layers = [64]):
super(weno_coef_DDPG_policy_net_fs, self).__init__()
self.fc_in = nn.Linear((state_dim - 1) * 2, hidden_layers[0]) ### use f1,...f6, s1,...s6 as state
self.hidden_fcs = nn.ModuleList()
for i in range(len(hidden_layers) - 1):
self.hidden_fcs.append(nn.Linear(hidden_layers[i], hidden_layers[i+1]))
self.fc_out = nn.Linear(hidden_layers[-1], action_dim // 2)
def forward(self, s):
'''
here, left means left to the ith point, i.e., the flux f_{i - 1/2}, and right means to the right of the point,
i.e., f_{i + 1/2}.
Left and right do not mean upwind direction.
'''
s_left = s[:, :-1]
s_right = s[:, 1:]
fs_left = s_left ** 2 / 2.
fs_right = s_right ** 2 / 2.
### use f and u as state
fs_left = torch.cat([s_left, fs_left], dim = 1)
fs_right = torch.cat([s_right, fs_right], dim = 1)
a_left = F.relu(self.fc_in(fs_left))
a_right = F.relu(self.fc_in(fs_right))
for layer in self.hidden_fcs:
a_left = F.relu(layer(a_left))
a_right = F.relu(layer(a_right))
a_left = F.softmax(self.fc_out(a_left), dim = 1)
a_right = F.softmax(self.fc_out(a_right), dim = 1)
action = torch.cat((a_left, a_right), dim = 1)
return action
class weno_coef_DDPG_policy_net(nn.Module):
'''
doc
'''
def __init__(self, state_dim, action_dim, hidden_layers=[64], flux='u2', mode='normalize', batch_norm=False):
super(weno_coef_DDPG_policy_net, self).__init__()
self.mode = mode
self.flux = flux
self.eps = torch.tensor([1e-10], dtype = torch.float)
self.batch_norm = batch_norm
print("in weno_coef_DDPG_policy_net, batch_norm is: ", self.batch_norm)
self.fc_in = nn.Linear(state_dim, hidden_layers[0]) ### use f1,...f6 (normal/origin), roe_speed as state
if batch_norm:
self.norm_in = nn.BatchNorm1d(hidden_layers[0])
self.hidden_fcs = nn.ModuleList()
if batch_norm:
self.fc_norm_layers = nn.ModuleList()
for i in range(len(hidden_layers) - 1):
self.hidden_fcs.append(nn.Linear(hidden_layers[i], hidden_layers[i+1]))
if batch_norm:
self.fc_norm_layers.append(nn.BatchNorm1d(hidden_layers[i + 1]))
self.fc_out = nn.Linear(hidden_layers[-1], action_dim)
def forward(self, s, flux=None):
'''
here, left means left to the ith point, i.e., the flux f_{i - 1/2}, and right means to the right of the point,
i.e., f_{i + 1/2}.
Left and right do not mean upwind direction.
'''
if flux is None:
flux = self.flux
fs_left, fs_right = process_state_roe(s, self.mode, flux, self.eps)
a_left = F.relu(self.fc_in(fs_left))
a_right = F.relu(self.fc_in(fs_right))
if self.batch_norm:
a_left = self.norm_in(a_left)
a_right = self.norm_in(a_right)
for idx in range(len(self.hidden_fcs)):
layer = self.hidden_fcs[idx]
a_left = F.relu(layer(a_left))
a_right = F.relu(layer(a_right))
if self.batch_norm:
norm_layers = self.fc_norm_layers
a_left = norm_layer(a_left)
a_right = norm_layer(a_right)
a_left = F.softmax(self.fc_out(a_left), dim = 1)
a_right = F.softmax(self.fc_out(a_right), dim = 1)
action = torch.cat((a_left, a_right), dim = 1)
return action
def to(self, device):
self.eps = self.eps.to(device)
return super(weno_coef_DDPG_policy_net, self).to(device)
# class DDPG_critic_network(nn.Module):
# '''
# Nips models.
# '''
# def __init__(self, state_dim, action_dim, hidden_layers):
# super(DDPG_critic_network, self).__init__()
# self.fc_in = nn.Linear(state_dim + action_dim, hidden_layers[0])
# self.hidden_layers = nn.ModuleList()
# for i in range(len(hidden_layers) - 1):
# self.hidden_layers.append(nn.Linear(hidden_layers[i], hidden_layers[i+1]))
# self.fc_out = nn.Linear(hidden_layers[-1], 1)
# def forward(self, s, a):
# feature = torch.cat((s,a), dim = 1)
# h = F.relu(self.fc_in(feature))
# for layer in self.hidden_layers:
# h = F.relu(layer(h))
# q = self.fc_out(h)
# return q
class DDPG_critic_network(nn.Module):
'''
doc
'''
def __init__(self, state_dim, action_dim, hidden_layers, mode, flux, batch_norm=False):
super(DDPG_critic_network, self).__init__()
self.mode = mode
self.flux = flux
self.eps = torch.tensor([1e-10], dtype = torch.float)
self.batch_norm = batch_norm
print("in DDPG_critic_network, batch_norm is: ", self.batch_norm)
self.fc_in = nn.Linear(state_dim + action_dim, hidden_layers[0])
if batch_norm:
self.norm_layer_in = nn.BatchNorm1d(hidden_layers[0])
self.hidden_layers = nn.ModuleList()
if batch_norm:
self.fc_norm_layers = nn.ModuleList()
for i in range(len(hidden_layers) - 1):
self.hidden_layers.append(nn.Linear(hidden_layers[i], hidden_layers[i+1]))
if batch_norm:
self.fc_norm_layers.append(nn.BatchNorm1d(hidden_layers[i+1]))
self.fc_out = nn.Linear(hidden_layers[-1], 1)
def forward(self, s, a):
fs_left, fs_right = process_state_roe(s, self.mode, self.flux, self.eps)
feature = torch.cat((fs_left, fs_right, a), dim = 1)
h = F.relu(self.fc_in(feature))
if self.batch_norm:
h = self.norm_layer_in(h)
for idx in range(len(self.hidden_layers)):
layer = self.hidden_layers[idx]
h = F.relu(layer(h))
if self.batch_norm:
norm_layer = self.fc_norm_layers[idx]
h = norm_layer(h)
q = self.fc_out(h)
return q.squeeze()
def to(self, device):
self.eps = self.eps.to(device)
return super(DDPG_critic_network, self).to(device)
class bottleblock(nn.Module):
def __init__(self, hidden, out):
super(bottleblock, self).__init__()
self.fc1 = nn.Linear(out, hidden)
self.fc2 = nn.Linear(hidden, out)
def forward(self, x):
x0 = x
out = F.relu(self.fc1(x))
out = self.fc2(out)
out += x0
return F.relu(out)
class weno_coef_DDPG_policy_residual_net(nn.Module):
'''
doc
'''
def __init__(self, state_dim, action_dim, residual_num = 3, flux = 'u2'):
super(weno_coef_DDPG_policy_residual_net, self).__init__()
self.resi0_0 = nn.Linear(state_dim, 64) ### use f1,...f6 + weno_weight as state. size 6 + 8
self.resi0_1 = nn.Linear(64, 4)
self.resi_blocks = nn.ModuleList()
for i in range(residual_num - 1):
self.resi_blocks.append(bottleblock(64, 4))
self.fc_out = nn.Linear(4, action_dim // 2)
self.flux = flux
self.eps = torch.tensor([1e-100], dtype = torch.float)
def forward(self, s):
'''
here, left means left to the ith point, i.e., the flux f_{i - 1/2}, and right means to the right of the point,
i.e., f_{i + 1/2}.
Left and right do not mean upwind direction.
'''
weno_left_weights, weno_right_weights = s[:, -8:-4], s[:, -4:]
fs_left, fs_right = process_state(s[:, :-8], self.flux, self.eps)
fs_left = torch.cat([weno_left_weights, fs_left], dim=1)
fs_right = torch.cat([weno_right_weights, fs_right], dim=1)
ss = [fs_left, fs_right]
w0 = [weno_left_weights, weno_right_weights]
out = [None, None]
for idx in range(2):
s = ss[idx]
s = F.relu(self.resi0_0(s))
s = self.resi0_1(s)
s += w0[idx]
s = F.relu(s)
for idx2 in range(len(self.resi_blocks)):
s = self.resi_blocks[idx2](s)
s = self.fc_out(s)
out[idx] = s
a_left = F.softmax(out[0], dim = 1)
a_right = F.softmax(out[1], dim = 1)
action = torch.cat((a_left, a_right), dim = 1)
return action
def to(self, device):
self.eps = self.eps.to(device)
return super(weno_coef_DDPG_policy_residual_net, self).to(device)
class DDPG_critic_network_residual(nn.Module):
'''
doc
'''
def __init__(self, state_dim, action_dim, residual_num = 3, flux = 'u2'):
super(DDPG_critic_network_residual, self).__init__()
self.flux = flux
self.resi0_0 = nn.Linear(state_dim + action_dim, 64) # state should be 6 + 6 + 8 + 8.
self.resi0_1 = nn.Linear(64, state_dim + action_dim)
self.resi_blocks = nn.ModuleList()
for i in range(residual_num - 1):
self.resi_blocks.append(bottleblock(64, state_dim + action_dim))
self.fc_out = nn.Linear(state_dim + action_dim, 1)
self.flux = flux
self.eps = torch.tensor([1e-100], dtype = torch.float)
def forward(self, s, a):
fs = process_state(s[:, :-8], self.flux, self.eps, mode='q') # N x 12
x = torch.cat([fs, s[:, -8:], a], dim = 1) # N x (12 + 8 + 8)
identity = x
out = F.relu(self.resi0_0(x))
out = self.resi0_1(out)
out += identity
out = F.relu(out)
for block in self.resi_blocks:
out = block(out)
q = self.fc_out(out)
return q
def to(self, device):
self.eps = self.eps.to(device)
return super(DDPG_critic_network_residual, self).to(device)
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,741
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/scripts/make_video.py
|
import json
import os
import os.path as osp
from BurgersEnv.Burgers import Burgers
from DDPG.DDPG_new import DDPG
from DDPG.util import GaussNoise
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import utils.ptu as ptu
import time
import argparse
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
def show(env, save_path=None, save_name=None, title=None, weno_solution='rk4', dx=None):
fig = plt.figure(figsize = (15, 10))
ax = fig.add_subplot(2,1,1)
ax.set_xlim((env.x_low ,env.x_high))
ymin, ymax = np.min(env.precise_weno_solutions) - 0.1, np.max(env.precise_weno_solutions) + 0.1
# ymin, ymax = -20, 20
ax.set_ylim((ymin, ymax))
lineweno, = ax.plot(env.x_grid, [0 for _ in range(env.num_x)], lw=2, marker='+', label = 'reference')
linerl, = ax.plot(env.x_grid, [0 for _ in range(env.num_x)],lw=2, marker='o', label = 'RL-WENO')
lineweno_coarse, = ax.plot(env.x_grid, [0 for _ in range(env.num_x)], marker='*', lw = 2, label = 'WENO')
# linezero, = ax.plot(env.x_grid, [0 for x in range(env.num_x)], lw=2, linestyle='dashed')
weno_coarse_grid_euler = env.weno_coarse_grid_euler.copy()
weno_coarse_grid_rk4 = env.weno_coarse_grid_rk4.copy()
draw_data = np.zeros((env.num_t, 3*env.num_x))
draw_data[:,env.num_x:2*env.num_x] = env.RLgrid
if weno_solution == 'euler':
draw_data[:, env.num_x*2:env.num_x*3] = weno_coarse_grid_euler[:env.num_t, :]
elif weno_solution == 'rk4':
draw_data[:, env.num_x*2:env.num_x*3] = weno_coarse_grid_rk4[:env.num_t, :]
for t in range(env.num_t):
draw_data[t, :env.num_x] = env.get_precise_value(t * env.dt) # when doing showing, use the grid values
error_ax = fig.add_subplot(2,1,2)
coarse_error = np.zeros(env.num_t)
RL_error = np.zeros(env.num_t)
for i in range(env.num_t):
coarse_error[i] = env.relative_error(draw_data[i, :env.num_x], draw_data[i, 2 * env.num_x:3*env.num_x])
RL_error[i] = env.relative_error(draw_data[i, :env.num_x], draw_data[i, env.num_x:2*env.num_x])
RL_error_line, = error_ax.plot(range(env.num_t), RL_error, 'r', lw= 2, label = 'RLWENO_relative_error')
weno_coarse_error_line, = error_ax.plot(range(env.num_t), coarse_error, 'b', lw = 2, label = 'weno_relative_error')
RL_error_point, = error_ax.plot([], [], 'ro', markersize = 5)
weno_coarse_error_point, = error_ax.plot([], [], 'bo', markersize = 5)
def init():
linerl.set_data([], [])
lineweno.set_data([],[])
lineweno_coarse.set_data([], [])
# linezero.set_data([], [])
RL_error_point.set_data([],[])
weno_coarse_error_point.set_data([],[])
linerl.set_label('RL-WENO')
lineweno.set_label('Reference')
lineweno_coarse.set_label('WENO')
return linerl, lineweno, lineweno_coarse, RL_error_point, weno_coarse_error_point#, linezero
def func(i):
# print('make animations, step: ', i)
x = np.linspace(env.x_low, env.x_high, env.num_x)
yweno = draw_data[i,:env.num_x]
yrl = draw_data[i, env.num_x:2 * env.num_x]
yweno_coarse = draw_data[i, 2 * env.num_x: 3 * env.num_x]
linerl.set_data(x,yrl)
lineweno.set_data(x, yweno)
lineweno_coarse.set_data(x, yweno_coarse)
# linezero.set_data(x, [0 for _ in x])
RL_error_point.set_data(i, RL_error[i])
weno_coarse_error_point.set_data(i, coarse_error[i])
error_ax.set_title("evolving time step: {}".format(i))
return linerl, lineweno, lineweno_coarse, RL_error_point, weno_coarse_error_point#, linezero
anim = animation.FuncAnimation(fig=fig, func=func, init_func=init, frames=env.num_t, interval=50)
plt.legend()
plt.title(title)
plt.tight_layout()
if save_path is not None:
anim.save(osp.join(save_path, save_name), writer=writer)
else:
plt.show()
plt.close()
parser = argparse.ArgumentParser()
parser.add_argument('--eta', type=float, default=0)
parser.add_argument('--forcing', type=int, default=0)
parser.add_argument('--dx', type=float, default=0.02)
parser.add_argument('--flux', type=str, default='u2')
args = parser.parse_args()
path = 'data/seuss/9-20-many-64-multiple-dx-normalize-eta-0-all-break/9-20-many-64-multiple-dx-normalize-eta-0-all-break_2020_09_20_04_04_24_0006/'
epoch = '1200'
path = 'data/seuss/9-23-many-64-multiple-dx-normalize-eta-0.01/9-23-many-64-multiple-dx-normalize-eta-0.01_2020_09_25_03_49_37_0005/'
epoch = '12800'
vv = json.load(open(osp.join(path, 'variant.json')))
if vv.get('batch_norm') is None:
vv['batch_norm'] = False
if vv.get('dx') is None:
vv['dx'] = args.dx
if vv.get('eta') is None:
vv['eta'] = args.eta
vv['eta'] = args.eta
if args.flux == 'u2':
ran = range(25, 50)
if args.eta == 0:
# pass
print("here")
vv['solution_data_path'] = 'data/local/solutions/8-14-50'
else:
vv['solution_data_path'] = 'data/local/solutions/9-17-50-eta-{}'.format(args.eta)
if args.forcing:
vv['eta'] = 0.01
vv['solution_data_path'] = 'data/local/solutions/9-24-50-eta-0.01-forcing-1'
elif args.flux == 'u4':
vv['flux'] = 'u4'
vv['solution_data_path'] = 'data/local/solutions/9-24-50-u4-eta-0-forcing-0'
ran = range(0, 25)
# vv['policy_hidden_layers'] = [64, 64, 64, 64, 64, 64]
# vv['state_mode'] = 'normalize'
ddpg = DDPG(vv, GaussNoise(initial_sig=vv['noise_beg'], final_sig=vv['noise_end']))
agent = ddpg
agent.load(osp.join(path, epoch), actor_only=True)
# agent.load(osp.join('data/local', '6150'), actor_only=True)
env = Burgers(vv, agent=agent)
# ptu.set_gpu_mode(True)
dx = args.dx if not args.forcing else args.dx * np.pi
beg = time.time()
num_t = int(0.9 * 10 / dx) if not args.forcing else int(0.9 * np.pi * 10 / dx)
for solution_idx in ran:
print("solution_idx: ", solution_idx)
pre_state = env.reset(solution_idx=solution_idx, num_t=num_t, dx=dx)
# pre_state = env.reset(solution_idx=solution_idx, num_t=200)
horizon = env.num_t
for t in range(1, horizon):
# print(t)
action = agent.action(pre_state, deterministic=True) # action: (state_dim -2, 1) batch
next_state, reward, done, _ = env.step(action, Tscheme='rk4')
pre_state = next_state
error, relative_error = env.error('rk4')
print("relative error: ", relative_error)
save_path = osp.join(path, 'videos', "{}_{}_{}_{}".format(epoch, vv['eta'], dx, args.flux))
if not osp.exists(save_path):
os.makedirs(save_path, exist_ok=True)
save_name = "{}.mp4".format(solution_idx)
# show(env, title='{}-{}-{}'.format(solution_idx, epoch, dx), dx=dx)
show(env, save_path=save_path, save_name=save_name, title='{}-{}'.format(solution_idx, epoch))
print("cost time: ", time.time() - beg)
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,742
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/launchers/launch_td3.py
|
from launchers.train_td3 import run_task
from chester.run_exp import run_experiment_lite, VariantGenerator
import time
import click
import numpy as np
@click.command()
@click.argument('mode', type=str, default='local')
@click.option('--debug/--no-debug', default=True)
@click.option('--dry/--no-dry', default=False)
def main(mode, debug, dry):
vg = VariantGenerator()
vg.add('load_path', [None])
# vg.add('load_path', [None])
vg.add('load_epoch', [2550])
### PDE settings
vg.add('boundary_condition', ['periodic'])
vg.add('Tscheme', ['euler'])
vg.add('solution_data_path', ['data/local/solutions/9-24-50-eta-0.01-forcing-1'])
vg.add('flux', ['u2'])
vg.add('dx', [[0.02 * np.pi, 0.04 * np.pi]])
vg.add('eta', [0.01])
### Training Env Settings
vg.add('state_mode', ['normalize']) # 'normalize', 'unnormalize', 'mix'
vg.add('state_dim', [7])
vg.add('action_dim', [4])
vg.add('weno_freq', [0.5])
vg.add('no_done', [True])
vg.add('same_time', [True, False])
### Training logics settings
vg.add('test_interval', [100])
vg.add('save_interval', [50])
vg.add('train_epoch', [30000])
### Reward Settings
vg.add('reward_width', [0, 3])
vg.add('reward_first_deriv_error_weight', [0])
### General RL Algorithm Parameters
vg.add('gamma', [0.99])
vg.add('actor_lr', [1e-4])
vg.add('final_actor_lr', [1e-7])
vg.add('critic_lr', [1e-3])
vg.add('final_critic_lr', [1e-7])
vg.add('batch_size', [64])
vg.add('policy_hidden_layers', [[64, 64, 64, 64, 64, 64]])
vg.add('critic_hidden_layers', [[64, 64, 64, 64, 64, 64, 64]])
vg.add('max_grad_norm', [0.5])
vg.add('clip_gradient', [0])
vg.add('lr_decay_interval', [0, 2000])
### DDPG parameter
vg.add('tau', [0.02])
vg.add('replay_buffer_size', [1000000])
vg.add('noise_beg', [0.2])
vg.add('noise_end', [0.01])
vg.add('noise_dec', [0.04])
vg.add('noise_dec_every', [500])
vg.add('ddpg_value_train_iter', [2])
vg.add('batch_norm', [False])
if not debug:
vg.add('seed', [100])
else:
vg.add('seed', [100])
exp_prefix = '9-25-many-64-multiple-dx-forcing-eta-0.01'
print("there are {} variants to run".format(len(vg.variants())))
sub_process_popens = []
for idx, vv in enumerate(vg.variants()):
while len(sub_process_popens) >= 1:
sub_process_popens = [x for x in sub_process_popens if x.poll() is None]
time.sleep(10)
compile_script = wait_compile = None
cur_popen = run_experiment_lite(
stub_method_call=run_task,
variant=vv,
mode=mode,
dry=dry,
use_gpu=True,
exp_prefix=exp_prefix,
wait_subprocess=debug,
compile_script=compile_script,
wait_compile=wait_compile,
)
if cur_popen is not None:
sub_process_popens.append(cur_popen)
if debug:
break
if __name__ == '__main__':
main()
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,743
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/BurgersEnv/Burgers.py
|
import numpy as np
import random
import math
import copy
import matplotlib
from matplotlib import pyplot as plt
from Weno.weno3_2 import Weno3
from Weno.finite_difference_weno import weno3_fd
from os import path as osp
import os
import torch
import gym
from gym.spaces import Box
from scipy import interpolate
from scripts.debug import debug
import time
class Burgers(gym.Env):
"""
u_t + (1/2u^2))_x = 0
u_t + u * u_x = 0
PDE class for RL learning, having the same interface as Gym
"""
def __init__(
self,
vv,
agent=None
):
'''
parameters:
args: a dictionary storing all kinds of arguments
agent: only required for RK4 time scheme.
'''
self.vv = copy.deepcopy(vv)
self.reward_width = vv['reward_width']
self.solution_data_path = vv['solution_data_path']
solution_path_list = os.listdir(self.solution_data_path)
self.solution_path_list = []
for x in solution_path_list:
if '.pkl' in x:
self.solution_path_list.append(x)
self.train_flag = True
# agent for rk4
self.agent = agent
if self.vv['Tscheme'] == 'rk4':
assert self.agent is not None, 'rk4 scheme is only used at test time. so you must provide '\
'a trained RL agent!'
# initial and boundary conditions
self.boundary_condition = vv['boundary_condition']
self.weno_w = np.array([[1/3, 0, 0, 0],
[-7/6, -1/6, 0, 0],
[11/6, 5/6, 1/3, 0],
[0, 1/3, 5/6, 11/6],
[0, 0, -1/6, -7/6],
[0, 0, 0, 1/3]])
self.observation_space = Box(low=np.array([-np.inf] * 7), high=np.array([np.inf] * 7))
self.action_space = Box(low=np.array([-1] * 4), high=np.array([1] * 4))
self.eta = self.vv['eta'] # viscous term coefficient
def compute_weno_error(self):
error_euler = np.zeros(self.num_t - 1)
error_rk4 = np.zeros(self.num_t - 1)
for i in range(1, self.num_t):
true_values = self.get_precise_value(i * self.dt)
error_euler[i-1] = self.relative_error(true_values, self.weno_coarse_grid_euler[i])
error_rk4[i-1] = self.relative_error(true_values, self.weno_coarse_grid_rk4[i])
self.weno_error_euler = np.mean(error_euler)
self.weno_error_rk4 = np.mean(error_rk4)
self.weno_error_all_rk4 = error_rk4
self.weno_error_all_euler = error_euler
### the burgers flux function. Can be changed to any other functions if needed for future useage.
def flux(self, val):
if self.vv['flux'] == 'u2':
return val ** 2 / 2.
elif self.vv['flux'] == 'u4':
return val ** 4 / 16.
elif self.vv['flux'] == 'u3':
return val ** 3 / 9.
elif self.vv['flux'] == 'BL':
return val ** 2 / (val ** 2 + 0.5 * (1-val) ** 2)
elif self.vv['flux'].startswith("linear"):
a = float(self.vv['flux'][len('linear'):])
return a * val
def reset(self, solution_idx=None, num_t=None, dx=None, dt=None, weno_regenerate=False):
"""
return the initial state and reset some parameters
"""
solution_num = len(self.solution_path_list)
if solution_idx is None:
if self.train_flag:
self.solution_idx = np.random.randint(0, solution_num // 2)
else:
self.solution_idx = np.random.randint(solution_num // 2, solution_num)
else:
self.solution_idx = solution_idx
solution_path = self.solution_path_list[self.solution_idx]
solution_path = osp.join(self.solution_data_path, solution_path)
solution_data = torch.load(solution_path)
args = solution_data['args']
if dx is None:
self.dx = np.random.choice(self.vv['dx'])
else:
self.dx = dx
if dt is None:
self.dt = self.dx * args.cfl
else:
self.dt = dt
self.precise_dx = args.precise_dx
self.precise_dt = self.precise_dx * args.cfl
self.x_low = args.x_low
self.x_high = args.x_high
if num_t is None:
total_num_t = int(args.T /self.dt) + 1
if not self.vv['same_time']:
self.num_t = np.random.randint(total_num_t // 4, total_num_t)
else:
self.num_t = total_num_t // 4 * 3
else:
self.num_t = num_t
self.weno_coarse_grid_euler = solution_data['coarse_solution_euler'][str(round(self.dx, 2))][:self.num_t]
if isinstance(solution_data['coarse_solution_rk4'], tuple):
self.weno_coarse_grid_rk4 = solution_data['coarse_solution_rk4'][0][str(round(self.dx, 2))][:self.num_t]
else:
self.weno_coarse_grid_rk4 = solution_data['coarse_solution_rk4'][str(round(self.dx, 2))][:self.num_t]
self.forcing = None
if solution_data.get('forcing') is not None:
print("get forcing!")
self.forcing = self.get_forcing_func(solution_data['forcing'])
self.num_x = len(self.weno_coarse_grid_rk4[0])
self.x_grid = np.linspace(self.x_low, self.x_high, self.num_x)
if weno_regenerate:
self.weno_coarse_grid_rk4, self.weno_coarse_grid_euler = self.regenerate_weno_coarse_solution(solution_data)
# debug(weno_coarse_grid_rk4, self.weno_coarse_grid_rk4)
precise_num_t = int((self.num_t * self.dt) // self.precise_dt + 10)
self.precise_weno_solutions = solution_data['precise_solution'][:precise_num_t]
self.precise_weno_grid = np.linspace(args.x_low, args.x_high, len(self.precise_weno_solutions[0]))
# first dim: x; second dim: t
self.RLgrid = np.zeros((self.num_t, self.num_x)) # record the value at each (x,t) point
self.initial_value = self.weno_coarse_grid_euler[0]
self.RLgrid[0, :] = self.initial_value
self.t_idx = 1
self.horizon = self.num_t - 1
self.compute_weno_error()
state = self.get_state(self.RLgrid[0])
return state
def regenerate_weno_coarse_solution(self, solution_data):
args = copy.deepcopy(solution_data['args'])
if args.__dict__.get('eta') is None:
args.__dict__['eta'] = 0
a, b, c, d, e = solution_data['a'], solution_data['b'], solution_data['c'], solution_data['d'], solution_data['e']
init_value = a + b * np.sin(c * np.pi * self.x_grid) + d * np.cos(e * np.pi * self.x_grid)
args.Tscheme = 'rk4'
beg = time.time()
coarse_solver = weno3_fd(args,
init_value=init_value, forcing=self.forcing, num_x=self.num_x, num_t=self.num_t, dt=self.dt, dx=self.dx)
weno_coarse_grid_rk4 = coarse_solver.solve()
self.weno_regenrate_time = time.time() - beg
args.Tscheme = 'euler'
coarse_solver = weno3_fd(args,
init_value=init_value, forcing=self.forcing, num_x=self.num_x, num_t=self.num_t, dt=self.dt, dx=self.dx)
weno_coarse_grid_euler = coarse_solver.solve()
return weno_coarse_grid_rk4, weno_coarse_grid_euler
### subsample the fine grid to get the coarse grid value.
def get_precise_value(self, t):
precise_t_idx = int(t / self.precise_dt)
precise_val = self.precise_weno_solutions[precise_t_idx]
f = interpolate.interp1d(self.precise_weno_grid, precise_val)
ret = f(self.x_grid)
return ret
def get_forcing_func(self, forcing_params):
a, omega, k, phi = forcing_params
def func(x, t, period):
spatial_phase = (2 * np.pi * k * x / period)
signals = np.sin(omega * t + spatial_phase + phi)
reference_forcing = np.sum(a * signals, axis=0)
return reference_forcing
return func
### the env moves on a step
def step(self, action, Tscheme=None, eval=False):
"""
parameters
----------
action: a batch of size (self.num_x, 1)
Return
------
next_state: the state after taking the action
reward: the reward of taking the action at the current state
done: whether all points have reached the terminal time point
None: to fit the Gym interfaces
"""
if Tscheme is None:
Tscheme = self.vv['Tscheme']
t_iter = self.t_idx
if Tscheme == 'euler':
self.RLgrid[t_iter] = self.RLgrid[t_iter - 1] + self.get_u_increment(self.RLgrid[t_iter - 1], action=action)
elif Tscheme == 'rk4':
self.RLgrid[t_iter] = self.rk_evolve(self.RLgrid[t_iter - 1], action)
else:
raise('invalid Time scheme!')
# clip to avoid numeric explosion
self.RLgrid[t_iter] = np.clip(self.RLgrid[t_iter], a_min = -50, a_max = 50)
done = [0 for i in range(self.num_x)]
reward = [0 for i in range(self.num_x)]
# give reward.
if not eval:
precise_val = self.get_precise_value(t_iter * self.dt)
# error on u
width = self.reward_width
RL_neighbor = [self.RLgrid[t_iter][max(i-width, 0):min(i+width+1, self.num_x)] for i in range(self.num_x)]
precise_neighbor = [precise_val[max(0, i-width):min(i+width+1, self.num_x)] for i in range(self.num_x)]
errors = [np.max(np.abs(RL_neighbor[i] - precise_neighbor[i])) + 1e-300
for i in range(self.num_x)]
# error on u_x
width = 1
RL_one_neighbor = self.expand_boundary(self.RLgrid[t_iter], width)
precise_one_neighbor = self.expand_boundary(precise_val, width)
RL_first_deri = [(RL_one_neighbor[i + 1] - RL_one_neighbor[i]) / self.dx for i in range(self.num_x)]
precise_first_deri = [(precise_one_neighbor[i + 1] - precise_one_neighbor[i]) / self.dx for i in range(self.num_x)]
first_deri_error_left = np.abs(np.array(RL_first_deri) - np.array(precise_first_deri))
RL_first_deri = [(RL_one_neighbor[i + 2] - RL_one_neighbor[i + 1]) / self.dx for i in range(self.num_x)]
precise_first_deri = [(precise_one_neighbor[i + 2] - precise_one_neighbor[i + 1]) / self.dx for i in range(self.num_x)]
first_deri_error_right = np.abs(np.array(RL_first_deri) - np.array(precise_first_deri))
reward = -np.log(errors) - \
self.vv['reward_first_deriv_error_weight'] * np.log(first_deri_error_left) - \
self.vv['reward_first_deriv_error_weight'] * np.log(first_deri_error_right)
if not self.vv['no_done']:
if t_iter == self.num_t - 1:
done = [1 for i in range(self.num_x)]
next_state = self.get_state(self.RLgrid[t_iter])
self.t_idx += 1
return next_state, reward, done, None
### expand the boundary
def expand_boundary(self, val, left_width, right_width = None):
'''
expand the boundary points.
'''
if right_width is None:
right_width = left_width
if self.boundary_condition == 'periodic':
tmp = list(val[-left_width - 1:-1]) + list(val) + list(val[1:right_width + 1])
elif self.boundary_condition == 'outflow':
tmp = list(val[:left_width]) + list(val) + list(val[-right_width:])
else:
raise('Invalide Boundary Condition!')
return tmp
### generate the state
def get_state(self, u):
u_ = np.array(u)
u_expand = self.expand_boundary(u_, 3)
next_state = [u_expand[i:i + 6 + 1] for i in range(self.num_x)]
return next_state
### the rk4 time scheme
def rk_evolve(self, u_start, action):
k1 = np.array(self.get_u_increment(u_start, action=action))
k2 = np.array(self.get_u_increment(u_start + 0.5 * k1))
k3 = np.array(self.get_u_increment(u_start + 0.5 * k2))
k4 = np.array(self.get_u_increment(u_start + k3))
return u_start + (k1 + 2 * (k2 + k3) + k4) / 6
### compute the change of the u-values
def get_u_increment(self, u, action = None):
if action is None: # rk4 step
states = self.get_state(u)
action = self.agent.action(states, True)
flux_derivative = self.get_derivative(u, action)
if self.eta > 0:
u_xx = np.zeros_like(u)
u_xx[1:-1] = (u[2:] + u[:-2] - 2 * u[1:-1]) / (self.dx ** 2)
u_xx[0] = (u[1] + u[-2] - 2 * u[0]) / (self.dx ** 2)
u_xx[-1] = (u[1] + u[-2] - 2 * u[-1]) / (self.dx ** 2)
forcing_term = 0
if self.forcing is not None:
period = self.x_high - self.x_low
forcing_term = self.forcing(x=self.x_grid, t=(self.t_idx-1) * self.dt, period=period)
if self.eta > 0:
increment = self.dt * (- flux_derivative + self.eta * u_xx + forcing_term)
else:
increment = self.dt * (-flux_derivative + forcing_term)
return increment
def get_derivative(self, u, action):
u_expand = self.expand_boundary(u, 3)
u_expand = np.array(u_expand)
fu_expand = self.flux(u_expand)
action_num = action.shape[1] // 2
left_flux_coef = action[:,:action_num] ### here left means i-1/2
right_flux_coef = action[:, action_num:] ### here right means 1 + 1/2
left_flux_points = np.array([fu_expand[i:i+6] for i in range(self.num_x)])
right_flux_points = np.array([fu_expand[i+1:i+7] for i in range(self.num_x)])
left_four_fluxes = left_flux_points.dot(self.weno_w)
right_four_fluxes = right_flux_points.dot(self.weno_w)
## let RL judge upwind direction
left_flux = [left_four_fluxes[i].dot(left_flux_coef[i]) for i in range(self.num_x)]
right_flux = [right_four_fluxes[i].dot(right_flux_coef[i]) for i in range(self.num_x)]
left_flux = np.array(left_flux)
right_flux = np.array(right_flux)
return (right_flux - left_flux) / self.dx
### Inf norm relative error
def relative_error(self, precise, coarse, norm=2):
return np.linalg.norm(precise - coarse, norm) / np.linalg.norm(precise, norm)
### compute the mean squared error compared with the fine-grid solution
def error(self, baseline='rk4'):
"""
return averaged relative l2 error.
"""
error = np.zeros(self.num_t - 1)
for i in range(1, self.num_t):
rl_values = self.RLgrid[i]
true_values = self.get_precise_value(self.dt * i)
error[i - 1] = self.relative_error(true_values, rl_values)
error = np.mean(error)
rel_error = error / self.weno_error_euler if baseline == 'euler' else error / self.weno_error_rk4
return error, rel_error
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,744
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/DDPG/DDPG_new.py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
from DDPG.util import GaussNoise
from DDPG.torch_networks import DDPG_critic_network, weno_coef_DDPG_policy_net
import utils.ptu as ptu
import copy
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for TD3 agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = [self.obs_buf[idxs],
self.act_buf[idxs],
self.rew_buf[idxs],
self.done_buf[idxs],
self.obs2_buf[idxs],]
return [torch.as_tensor(v, dtype=torch.float32).to(ptu.device) for v in batch]
class DDPG():
'''
doc for ddpg
'''
def __init__(self, vv, noise):
self.vv = copy.deepcopy(vv)
self.action_noise = noise
self.replay_mem = ReplayBuffer(vv['state_dim'], vv['action_dim'] * 2, vv['replay_buffer_size'])
policy_hidden_layers = vv['policy_hidden_layers']
critic_hidden_layers = vv['critic_hidden_layers']
self.actor = weno_coef_DDPG_policy_net(vv['state_dim'], vv['action_dim'],
policy_hidden_layers, vv['flux'], vv['state_mode'], vv['batch_norm']).to(ptu.device)
self.actor_target = copy.deepcopy(self.actor)
self.critic = DDPG_critic_network(vv['state_dim'] * 2, vv['action_dim'] * 2, critic_hidden_layers,
vv['state_mode'], vv['flux'], vv['batch_norm']).to(ptu.device)
self.critic_target = copy.deepcopy(self.critic)
self.critic2 = DDPG_critic_network(vv['state_dim'] * 2, vv['action_dim'] * 2, critic_hidden_layers,
vv['state_mode'], vv['flux'], vv['batch_norm']).to(ptu.device) ### twin
self.critic_target2 = copy.deepcopy(self.critic2)
self.actor_optimizer = optim.Adam(self.actor.parameters(), vv['actor_lr'])
self.critic_optimizer = optim.Adam(self.critic.parameters(), vv['critic_lr'])
self.critic_optimizer2 = optim.RMSprop(self.critic2.parameters(), vv['critic_lr']) ### twin
self.global_step = 0
def soft_update(self, target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
### training process
def train(self):
if self.replay_mem.size < self.vv['batch_size']:
return None, None, None
### sample $self.batch_size$ samples from the replay memory, and use them to train
value_loss = 0
value_loss2 = 0
policy_loss = 0
### use the target_Q_network to get the target_Q_value
for _ in range(self.vv['ddpg_value_train_iter']):
prev_state, action, reward, done, next_state = self.replay_mem.sample_batch(self.vv['batch_size'])
# print(prev_state)
q_pred = self.critic(prev_state, action)
with torch.no_grad():
pi_targ = self.actor_target(next_state)
# Target Q-values
q1_pi_targ = self.critic_target(next_state, pi_targ)
q2_pi_targ = self.critic_target2(next_state, pi_targ)
q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ)
backup = reward + self.vv['gamma'] * (1 - done) * q_pi_targ
closs = (q_pred - backup) ** 2
closs = torch.mean(closs)
value_loss += closs.cpu().item()
self.critic_optimizer.zero_grad()
closs.backward()
if self.vv['clip_gradient']:
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.vv['max_grad_norm'])
self.critic_optimizer.step()
### twin
q_pred2 = self.critic2(prev_state, action)
closs2 = (q_pred2 - backup) ** 2
closs2 = torch.mean(closs2)
value_loss2 += closs2.cpu().item()
self.critic_optimizer2.zero_grad()
closs2.backward()
if self.vv['clip_gradient']:
torch.nn.utils.clip_grad_norm_(self.critic2.parameters(), self.vv['max_grad_norm'])
self.critic_optimizer2.step()
value_loss /= self.vv['ddpg_value_train_iter']
value_loss2 /= self.vv['ddpg_value_train_iter']
aloss = - self.critic(prev_state, self.actor(prev_state))
aloss = aloss.mean()
self.actor_optimizer.zero_grad()
aloss.backward()
policy_loss += aloss.cpu().item()
if self.vv['clip_gradient']:
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.vv['max_grad_norm'])
self.actor_optimizer.step()
self.global_step += 1
self.soft_update(self.actor_target, self.actor, self.vv['tau'])
self.soft_update(self.critic_target, self.critic, self.vv['tau'])
self.soft_update(self.critic_target2, self.critic2, self.vv['tau']) ### twin
return policy_loss, value_loss, value_loss2
def store(self, state, action, reward, next_state, done):
self.replay_mem.store(state, action, reward, next_state, done)
### use the action_policy_net to compute the action
def action(self, s, deterministic=False, mode='agent', flux=None):
assert mode in ['weno', 'agent'], 'only supports `weno` or `agent`'
if mode == 'weno':
weno_coef = self.weno_coef(np.array(s))
return weno_coef
s = torch.FloatTensor(s).to(ptu.device)
with torch.no_grad():
action = self.actor(s, flux=flux)
action = action.cpu().numpy()
action_num = action.shape[1] // 2
if not deterministic: ### each action, each dimension add noise
a_left, a_right = action[:, :action_num], action[:,action_num:]
if not self.vv['batch_norm']:
assert np.sum(np.abs(a_left[1:] - a_right[:-1])) < 1e-5
size = a_left.shape
noise_left = self.action_noise.noise(size=size)
noise_right = self.action_noise.noise(size=size)
a_left += noise_left
a_right += noise_right
a_left = np.clip(a=a_left, a_min=1e-20, a_max=None)
a_right = np.clip(a=a_right, a_min=1e-20, a_max=None)
a_left = a_left / np.sum(a_left, axis=1).reshape(-1,1)
a_right = a_right / np.sum(a_right, axis=1).reshape(-1,1)
action = np.concatenate((a_left, a_right), axis=1)
return action
def weno_coef(self, s):
'''
This function directly construct num_x + 1 fluxes, which is
f_{-1/2}, f_{1/2}, ... , f_{num_x - 1 / 2}, f_{num_x + 1 / 2}
param s: still assume each s has 7 elements, and in total there are num_x points (grid size). \
we add three ghost points at the boundary. so a s is, e.g., {u-3, u-2, u-1, u0, u1, u2, u3}
'''
num = len(s)
### when computing betas, finite difference weno reconstrunction use the flux values as the cell average.
f = s ** 2 / 2.
dleft2, dleft1, dleft0 = 0.1, 0.6, 0.3 ### ideal weight for reconstruction of the left index boundary. (or the minus one in the book.)
dright2, dright1, dright0 = 0.3, 0.6, 0.1
fl = np.zeros((num + 1, 5))
fr = np.zeros((num + 1, 5))
fl[:-1] = f[:, :5]
fl[-1] = f[-1, 1:6] ### need to add for the flux f_{num_x + 1 / 2}
fr[:-1] = f[:, 1:6]
fr[-1] = f[-1, 2:7] ### need to add for the flux f_{num_x + 1 / 2}
### in the following coef related vars (beta, alpha), the number indicated 'r', i.e., the shift of the leftmost points of the stencil.
betal0 = 13 / 12 * (fl[:,2] - 2 * fl[:,3] + fl[:,4]) ** 2 + 1 / 4 * (3 * fl[:,2] - 4 * fl[:,3] + fl[:,4]) ** 2
betal1 = 13 / 12 * (fl[:,1] - 2 * fl[:,2] + fl[:,3]) ** 2 + 1 / 4 * (fl[:,1] - fl[:,3]) ** 2
betal2 = 13 / 12 * (fl[:,0] - 2 * fl[:,1] + fl[:,2]) ** 2 + 1 / 4 * (fl[:,0] - 4 * fl[:,1] + 3 * fl[:,2]) ** 2
betar0 = 13 / 12 * (fr[:,2] - 2 * fr[:,3] + fr[:,4]) ** 2 + 1 / 4 * (3 * fr[:,2] - 4 * fr[:,3] + fr[:,4]) ** 2
betar1 = 13 / 12 * (fr[:,1] - 2 * fr[:,2] + fr[:,3]) ** 2 + 1 / 4 * (fr[:,1] - fr[:,3]) ** 2
betar2 = 13 / 12 * (fr[:,0] - 2 * fr[:,1] + fr[:,2]) ** 2 + 1 / 4 * (fr[:,0] - 4 * fr[:,1] + 3 * fr[:,2]) ** 2
eps = 1e-6
alphal0 = dleft0 / (betal0 + eps) ** 2
alphal1 = dleft1 / (betal1 + eps) ** 2
alphal2 = dleft2 / (betal2 + eps) ** 2
wl0 = alphal0 / (alphal0 + alphal1 + alphal2)
wl1 = alphal1 / (alphal0 + alphal1 + alphal2)
wl2 = alphal2 / (alphal0 + alphal1 + alphal2)
alphar0 = dright0 / (betar0 + eps) ** 2
alphar1 = dright1 / (betar1 + eps) ** 2
alphar2 = dright2 / (betar2 + eps) ** 2
wr0 = alphar0 / (alphar0 + alphar1 + alphar2)
wr1 = alphar1 / (alphar0 + alphar1 + alphar2)
wr2 = alphar2 / (alphar0 + alphar1 + alphar2)
### compute the roe speed, and for flux f_{i + 1/2} it is (f_{i+1} - f_{i}) / (u_{i+1} - u_{i})
roe = np.zeros(num + 1)
# roe[:-1] = (f[:,3] - f[:, 2]) / (s[:, 3] - s[:,2])
# roe[-1] = (f[-1,4] - f[-1, 3]) / (s[-1, 4] - s[-1,3]) ### need to add one more roe speed for flux f_{num_x + 1 / 2}.
roe[:-1] = (s[:, 3] + s[:,2])
roe[-1] = (s[-1, 4] + s[-1,3]) ### need to add one more roe speed for flux f_{num_x + 1 / 2}.
### we put all four possible stencils all together for computation, while in weno only 3 can have positive weight at the same time.
coef = np.zeros((num + 1, 4))
# coef = np.zeros((num + 1,3))
for i in range(num + 1):
# judge = ori_s[i][2] if i < num else ori_s[-1][3]
judge = roe[i]
if judge >= 0: ### if roe speed > 0, use the minus (left) flux.
coef[i][0] = wl2[i]
coef[i][1] = wl1[i]
coef[i][2] = wl0[i]
else: ### if roe speed < 0, use the plus (right) flux.
coef[i][1] = wr2[i]
coef[i][2] = wr1[i]
coef[i][3] = wr0[i]
action_left = coef[:-1]
action_right = coef[1:]
action = np.concatenate((action_left, action_right), axis = 1)
return action
def train_mode(self, flag):
if flag:
self.actor.train()
self.actor_target.train()
self.critic.train()
self.critic2.train()
self.critic_target.train()
self.critic_target2.train()
else:
self.actor.eval()
self.actor_target.eval()
self.critic.eval()
self.critic2.eval()
self.critic_target.eval()
self.critic_target2.eval()
def save(self, save_path=None):
path = save_path
torch.save(self.actor.state_dict(), path + 'ddpgactor.txt' )
torch.save(self.critic.state_dict(), path + 'ddpgcritic.txt')
def load(self, load_path, actor_only=False):
if not actor_only:
self.critic.load_state_dict(torch.load(load_path + 'ddpgcritic.txt', map_location=lambda storage, loc: storage))
self.actor.load_state_dict(torch.load(load_path + 'ddpgactor.txt', map_location=lambda storage, loc: storage))
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,745
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/DDPG/train_util.py
|
'''
File Description:
This file is the driver for training the DDPG agent.
'''
import torch
import numpy as np
from threading import Thread
import random, copy
from matplotlib import pyplot as plt
from chester import logger
import os
import os.path as osp
def DDPG_train(vv, env, agent):
'''
Given the training args, training/testing environments, log files, and the DDPG agents, this function
uses the python threading module to interact the agent with the training environment in multiple threads,
collect interacting samples, perform training of the agent, and record necessary logistics in log files.
'''
a_lr = vv['actor_lr']
c_lr = vv['critic_lr']
### training begins
for train_iter in range(vv['train_epoch']):
dx = np.random.choice(vv['dx'])
if dx <= 0.02:
weno_freq = vv['weno_freq']
else:
weno_freq = 0
pre_state = env.reset(dx=dx)
env_batch_size = env.num_x
horizon = env.horizon
# decay learning rate
if train_iter > 0 and vv['lr_decay_interval'] > 0 and train_iter % vv['lr_decay_interval'] == 0:
a_lr = max(vv['final_actor_lr'], a_lr / 2)
c_lr = max(vv['final_critic_lr'], c_lr / 2)
c_optimizers = [agent.critic_optimizer, agent.critic_optimizer2]
for optim in c_optimizers:
for param_group in optim.param_groups:
param_group['lr'] = c_lr
for param_group in agent.actor_optimizer.param_groups:
param_group['lr'] = a_lr
ret = 0
for t in range(1, horizon):
p = np.random.rand()
if p < weno_freq:
action = agent.action(pre_state, mode='weno')
else:
action = agent.action(pre_state)
### next_state, reward, done, all batches
next_state, reward, done, _ = env.step(action)
ret += np.mean(reward)
# TODO: change this to support store a batch
for i in range(env_batch_size):
agent.store(pre_state[i], action[i], reward[i], next_state[i], done[i])
agent.train()
pre_state = next_state
error, relative_error = env.error('euler')
solution_idx = env.solution_idx
logger.record_tabular('Train/{}-error'.format(solution_idx), error)
logger.record_tabular('Train/{}-ret'.format(solution_idx), ret)
logger.record_tabular('Train/{}-relative-error'.format(solution_idx), relative_error)
### decrease exploration ratio of the threading agents
if train_iter > 0 and train_iter % vv['noise_dec_every'] == 0:
agent.action_noise.decrease(vv['noise_dec'])
# TODO: implement possible learning rate decay
# if train_iter > 0 and train_iter % vv['decay_learning_rate'] == 0:
# # update_linear_schedule(self.critic_optimizer, self.global_step, self.vv[train_epoch, self.args.c_lr, self.args.final_c_lr)
# # update_linear_schedule(self.critic_optimizer2, self.global_step, self.args.train_epoch, self.args.c_lr, self.args.final_c_lr)
# # update_linear_schedule(self.actor_optimizer, self.global_step, self.args.train_epoch, self.args.a_lr, self.args.final_a_lr)
# pass
### test central_agent in test_envs, both euler error and rk4 error.
if train_iter % vv['test_interval'] == 0:
env.train_flag = False
agent.train_mode(False)
for dx in vv['dx']:
print("test begin")
errors, relative_errors = [], []
for solution_idx in range(len(env.solution_path_list) // 2, len(env.solution_path_list)):
pre_state = env.reset(solution_idx=solution_idx, num_t=200, dx=dx)
horizon = env.num_t
for t in range(1, horizon):
action = agent.action(pre_state, deterministic=True) # action: (state_dim -2, 1) batch
next_state, reward, done, _ = env.step(action, Tscheme='rk4')
pre_state = next_state
error, relative_error = env.error('rk4')
errors.append(error)
relative_errors.append(relative_error)
names = ['error', 'relative_error']
all_errors = [errors, relative_errors]
for i in range(len(names)):
name = names[i]
errors = all_errors[i]
logger.record_tabular(f'Test/{dx}_{name}_mean', np.mean(errors))
logger.record_tabular(f'Test/{dx}_{name}_max', np.max(errors))
logger.record_tabular(f'Test/{dx}_{name}_min', np.min(errors))
logger.record_tabular(f'Test/{dx}_{name}_median', np.median(errors))
logger.record_tabular(f'Test/{dx}_{name}_std', np.std(errors))
print("test end")
logger.dump_tabular()
env.train_flag = True
agent.train_mode(True)
if train_iter % vv['save_interval'] == 0 and train_iter > 0:
agent.save(osp.join(logger.get_dir(), str(train_iter)))
return agent
def DDPG_test(agent, test_envs, args = None, Tscheme = None):
'''
This function interacts the agent with the environment without exploration noise to test its
deterministic performance.
### Arguments:
agent (class DDPG object):
the trained agent to be tested.
test_envs (list of class Burgers objects):
a list of test environments with different initial contions/dx/dt, etc.
args (optional, python namespace):
A namespace variable that stores all necessary parameters for the whole training procedure.
Tscheme (optional, string, 'rk4' or 'euler'):
When given, specifies the temporal discretization scheme, either Euler scheme or the Runge-kuta 4-th order scheme.
### Return
errors (A list of floats):
A list of the relative errors at the terminal time step in each test environment.
'''
errors = []
rets = []
for env in test_envs:
env.set_args()
pre_state = env.reset()
t_range = env.num_t
ret = 0
for t in range(1, t_range):
action = agent.action(pre_state, True) # action: (state_dim -2, 1) batch
### next_state, reward, done, all batches
### when doing test, use rk4 as default, and mode == test ensures all actions are generated using the agent
next_state, reward, done, _ = env.step(action, t, Tscheme = Tscheme, mode = 'test')
ret += np.mean(reward)
pre_state = copy.copy(next_state)
rets.append(ret)
error, _ = env.error()
errors.append(error)
return errors, rets
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,746
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/Weno/weno3_2.py
|
import numpy as np
import scipy.optimize as sco
from matplotlib import pyplot as plt
"""
This file implements 5-order finite volume WENO on Burgers Equation.
See bottom for usage examples.
"""
class Weno3:
def __init__(self, left_boundary, right_boundary, ncells, flux_callback, flux_deriv_callback,
max_flux_deriv_callback, dx, dt, boundary = 'periodic',
char_speed = 1, cfl_number=0.8, eps=1.0e-6, record = True, num_t = 6000,
eta = 0, forcing=None):
"""
:rtype : Weno3
"""
a = left_boundary
b = right_boundary
self.N = ncells
# self.dx = (b - a) / (self.N + 0.0)
self.dx = dx
self.dt = dt
self.CFL_NUMBER = cfl_number
self.CHAR_SPEED = char_speed
self.t = 0.0
ORDER_OF_SCHEME = 3
self.EPS = eps
# Ideal weights for the right boundary.
self.iw_right = np.array([[3.0 / 10.0], [6.0 / 10.0], [1.0 / 10.0]])
self.iw_left = np.array([[1.0 / 10.0], [6.0 / 10.0], [3.0 / 10.0]])
self.flux = flux_callback
self.flux_deriv = flux_deriv_callback
self.max_flux_deriv = max_flux_deriv_callback
self.x_boundary = np.linspace(a, b, self.N + 1)
self.x_center = np.zeros(self.N)
self.eta = eta # viscous term coefficient
self.forcing = forcing # force term
for i in range(0, self.N):
self.x_center[i] = (self.x_boundary[i] + self.x_boundary[i + 1]) / 2.0
self.u_right_boundary_approx = np.zeros((ORDER_OF_SCHEME, self.N))
self.u_left_boundary_approx = np.zeros((ORDER_OF_SCHEME, self.N))
self.u_right_boundary = np.zeros(self.N)
self.u_left_boundary = np.zeros(self.N)
self.beta = np.zeros((ORDER_OF_SCHEME, self.N))
self.alpha_right = np.zeros((ORDER_OF_SCHEME, self.N))
self.alpha_left = np.zeros((ORDER_OF_SCHEME, self.N))
self.sum_alpha_right = np.zeros(self.N)
self.sum_alpha_left = np.zeros(self.N)
self.omega_right = np.zeros((ORDER_OF_SCHEME, self.N))
self.omega_left = np.zeros((ORDER_OF_SCHEME, self.N))
self.fFlux = np.zeros(self.N + 1)
self.rhsValues = np.zeros(self.N)
self.u_multistage = np.zeros((3, self.N))
if record:
self.u_grid = np.zeros((num_t, self.N))
self.record = record
self.boundary = boundary
# self.u_grid = []
def integrate(self, u0, time_final, num_t=None):
# self.dt = self.CFL_NUMBER * self.dx / self.CHAR_SPEED
self.T = time_final
self.u_multistage[0] = u0
if self.record:
self.u_grid[0] = u0
# self.u_grid.append(u0)
index = 1
if num_t is None:
evolve_time = int(time_final / self.dt)
else:
evolve_time = num_t
print('T is {0}, dt is {1}, evolve time is {2}'.format(time_final, self.dt, evolve_time))
# while self.t < self.T:
# if self.t + self.dt > self.T:
# self.dt = self.T - self.t
for _ in range(evolve_time):
self.u_multistage[1] = self.u_multistage[0] + self.dt * self.rhs(self.u_multistage[0])
self.u_multistage[2] = (3 * self.u_multistage[0] + self.u_multistage[1] + self.dt * self.rhs(self.u_multistage[1])) / 4.0
self.u_multistage[0] = (self.u_multistage[0] + 2.0 * self.u_multistage[2] + 2.0 * self.dt * self.rhs(self.u_multistage[2])) / 3.0
if self.record:
self.u_grid[index] = self.u_multistage[0]
# self.u_grid.append(self.u_multistage[0])
index += 1
self.t += self.dt
return self.u_multistage[0]
def rhs(self, u):
self._rhs(u)
# Numerical flux calculation.
self.fFlux[1:-1] = self.numflux(self.u_right_boundary[0:-1], self.u_left_boundary[1:])
self.fFlux[0] = self.numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
self.fFlux[self.N] = self.numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
# Right hand side calculation, for the u^2 flux part.
rhsValues = self.fFlux[1:] - self.fFlux[0:-1]
rhsValues = -rhsValues / self.dx
# compute the viscous term part
if self.eta > 0:
# self.fFlux[1:-1] = self.identity_numflux(self.u_right_boundary[0:-1], self.u_left_boundary[1:])
# self.fFlux[0] = self.identity_numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
# self.fFlux[self.N] = self.identity_numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
# u_x = self.fFlux[1:] - self.fFlux[0:-1]
# u_x = u_x / self.dx
u_x = np.zeros_like(u)
u_x[1:-1] = (u[2:] - u[:-2]) / (2 * self.dx)
u_x[0] = (u[1] - u[-2]) / (2 * self.dx)
u_x[-1] = (u[1] - u[-2]) / (2 * self.dx)
# self._rhs(u_x) # recompute the left/right boundary using u_x as the grid value
# self.fFlux[1:-1] = self.identity_numflux(self.u_right_boundary[0:-1], self.u_left_boundary[1:])
# self.fFlux[0] = self.identity_numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
# self.fFlux[self.N] = self.identity_numflux(self.u_right_boundary[self.N - 1], self.u_left_boundary[0])
# u_xx = self.fFlux[1:] - self.fFlux[0:-1]
# u_xx = u_xx / self.dx
u_xx = np.zeros_like(u_x)
u_xx[1:-1] = (u_x[2:] - u_x[:-2]) / (2 * self.dx)
u_xx[0] = (u_x[1] - u_x[-2]) / (2 * self.dx)
u_xx[-1] = (u_x[1] - u_x[-2]) / (2 * self.dx)
rhsValues += self.eta * u_xx
if self.forcing is not None:
period = self.x_center[-1] - self.x_center[0]
rhsValues += self.forcing(x=self.x_center, t=self.t, period=period)
return rhsValues
def _rhs(self, u):
# WENO Reconstruction
# Approximations for inner cells 0 < i < N-1.
self.u_right_boundary_approx[0][2:-2] = 1.0 / 3.0 * u[2:-2] + 5.0 / 6.0 * u[3:-1] - 1.0 / 6.0 * u[4:]
self.u_right_boundary_approx[1][2:-2] = -1.0 / 6.0 * u[1:-3] + 5.0 / 6.0 * u[2:-2] + 1.0 / 3.0 * u[3:-1]
self.u_right_boundary_approx[2][2:-2] = 1.0 / 3.0 * u[0:-4] - 7.0 / 6.0 * u[1:-3] + 11.0 / 6.0 * u[2:-2]
self.u_left_boundary_approx[0][2:-2] = 11.0 / 6.0 * u[2:-2] - 7.0 / 6.0 * u[3:-1] + 1.0 / 3.0 * u[4:]
self.u_left_boundary_approx[1][2:-2] = 1.0 / 3.0 * u[1:-3] + 5.0 / 6.0 * u[2:-2] - 1.0 / 6.0 * u[3:-1]
self.u_left_boundary_approx[2][2:-2] = -1.0 / 6.0 * u[0:-4] + 5.0 / 6.0 * u[1:-3] + 1.0 / 3.0 * u[2:-2]
if self.boundary == 'periodic':
# Approximations for cell i = 0 (the leftmost cell).
self.u_right_boundary_approx[0][0] = 1.0 / 3.0 * u[0] + 5.0 / 6.0 * u[1] - 1.0 / 6.0 * u[2]
self.u_right_boundary_approx[1][0] = -1.0 / 6.0 * u[-2] + 5.0 / 6.0 * u[0] + 1.0 / 3.0 * u[1]
self.u_right_boundary_approx[2][0] = 1.0 / 3.0 * u[-3] - 7.0 / 6.0 * u[-2] + 11.0 / 6.0 * u[0]
self.u_left_boundary_approx[0][0] = 11.0 / 6.0 * u[0] - 7.0 / 6.0 * u[1] + 1.0 / 3.0 * u[2]
self.u_left_boundary_approx[1][0] = 1.0 / 3.0 * u[-2] + 5.0 / 6.0 * u[0] - 1.0 / 6.0 * u[1]
self.u_left_boundary_approx[2][0] = -1.0 / 6.0 * u[-3] + 5.0 / 6.0 * u[-2] + 1.0 / 3.0 * u[0]
# Approximations for cell i = 1.
self.u_right_boundary_approx[0][1] = 1.0 / 3.0 * u[1] + 5.0 / 6.0 * u[2] - 1.0 / 6.0 * u[3]
self.u_right_boundary_approx[1][1] = -1.0 / 6.0 * u[0] + 5.0 / 6.0 * u[1] + 1.0 / 3.0 * u[2]
self.u_right_boundary_approx[2][1] = 1.0 / 3.0 * u[-2] - 7.0 / 6.0 * u[0] + 11.0 / 6.0 * u[1]
self.u_left_boundary_approx[0][1] = 11.0 / 6.0 * u[1] - 7.0 / 6.0 * u[2] + 1.0 / 3.0 * u[3]
self.u_left_boundary_approx[1][1] = 1.0 / 3.0 * u[0] + 5.0 / 6.0 * u[1] - 1.0 / 6.0 * u[2]
self.u_left_boundary_approx[2][1] = -1.0 / 6.0 * u[-2] + 5.0 / 6.0 * u[0] + 1.0 / 3.0 * u[1]
# Approximations for cell i = N-2.
self.u_right_boundary_approx[0][-2] = 1.0 / 3.0 * u[-2] + 5.0 / 6.0 * u[-1] - 1.0 / 6.0 * u[1]
self.u_right_boundary_approx[1][-2] = -1.0 / 6.0 * u[-3] + 5.0 / 6.0 * u[-2] + 1.0 / 3.0 * u[-1]
self.u_right_boundary_approx[2][-2] = 1.0 / 3.0 * u[-4] - 7.0 / 6.0 * u[-3] + 11.0 / 6.0 * u[-2]
self.u_left_boundary_approx[0][-2] = 11.0 / 6.0 * u[-2] - 7.0 / 6.0 * u[-1] + 1.0 / 3.0 * u[1]
self.u_left_boundary_approx[1][-2] = 1.0 / 3.0 * u[-3] + 5.0 / 6.0 * u[-2] - 1.0 / 6.0 * u[-1]
self.u_left_boundary_approx[2][-2] = -1.0 / 6.0 * u[-4] + 5.0 / 6.0 * u[-3] + 1.0 / 3.0 * u[-2]
# Approximations for cell i = N-1 (the rightmost cell).
self.u_right_boundary_approx[0][-1] = 1.0 / 3.0 * u[-1] + 5.0 / 6.0 * u[1] - 1.0 / 6.0 * u[2]
self.u_right_boundary_approx[1][-1] = -1.0 / 6.0 * u[-2] + 5.0 / 6.0 * u[-1] + 1.0 / 3.0 * u[1]
self.u_right_boundary_approx[2][-1] = 1.0 / 3.0 * u[-3] - 7.0 / 6.0 * u[-2] + 11.0 / 6.0 * u[-1]
self.u_left_boundary_approx[0][-1] = 11.0 / 6.0 * u[-1] - 7.0 / 6.0 * u[1] + 1.0 / 3.0 * u[2]
self.u_left_boundary_approx[1][-1] = 1.0 / 3.0 * u[-2] + 5.0 / 6.0 * u[-1] - 1.0 / 6.0 * u[1]
self.u_left_boundary_approx[2][-1] = -1.0 / 6.0 * u[-3] + 5.0 / 6.0 * u[-2] + 1.0 / 3.0 * u[-1]
elif self.boundary == 'outflow':
print('outflow')
# Approximations for cell i = 0 (the leftmost cell).
self.u_right_boundary_approx[0][0] = 1.0 / 3.0 * u[0] + 5.0 / 6.0 * u[1] - 1.0 / 6.0 * u[2]
self.u_right_boundary_approx[1][0] = -1.0 / 6.0 * u[0] + 5.0 / 6.0 * u[0] + 1.0 / 3.0 * u[1]
self.u_right_boundary_approx[2][0] = 1.0 / 3.0 * u[0] - 7.0 / 6.0 * u[0] + 11.0 / 6.0 * u[0]
self.u_left_boundary_approx[0][0] = 11.0 / 6.0 * u[0] - 7.0 / 6.0 * u[1] + 1.0 / 3.0 * u[2]
self.u_left_boundary_approx[1][0] = 1.0 / 3.0 * u[0] + 5.0 / 6.0 * u[0] - 1.0 / 6.0 * u[1]
self.u_left_boundary_approx[2][0] = -1.0 / 6.0 * u[0] + 5.0 / 6.0 * u[0] + 1.0 / 3.0 * u[0]
# Approximations for cell i = 1.
self.u_right_boundary_approx[0][1] = 1.0 / 3.0 * u[1] + 5.0 / 6.0 * u[2] - 1.0 / 6.0 * u[3]
self.u_right_boundary_approx[1][1] = -1.0 / 6.0 * u[0] + 5.0 / 6.0 * u[1] + 1.0 / 3.0 * u[2]
self.u_right_boundary_approx[2][1] = 1.0 / 3.0 * u[0] - 7.0 / 6.0 * u[0] + 11.0 / 6.0 * u[1]
self.u_left_boundary_approx[0][1] = 11.0 / 6.0 * u[1] - 7.0 / 6.0 * u[2] + 1.0 / 3.0 * u[3]
self.u_left_boundary_approx[1][1] = 1.0 / 3.0 * u[0] + 5.0 / 6.0 * u[1] - 1.0 / 6.0 * u[2]
self.u_left_boundary_approx[2][1] = -1.0 / 6.0 * u[0] + 5.0 / 6.0 * u[0] + 1.0 / 3.0 * u[1]
# Approximations for cell i = N-2.
self.u_right_boundary_approx[0][-2] = 1.0 / 3.0 * u[-2] + 5.0 / 6.0 * u[-1] - 1.0 / 6.0 * u[-1]
self.u_right_boundary_approx[1][-2] = -1.0 / 6.0 * u[-3] + 5.0 / 6.0 * u[-2] + 1.0 / 3.0 * u[-1]
self.u_right_boundary_approx[2][-2] = 1.0 / 3.0 * u[-4] - 7.0 / 6.0 * u[-3] + 11.0 / 6.0 * u[-2]
self.u_left_boundary_approx[0][-2] = 11.0 / 6.0 * u[-2] - 7.0 / 6.0 * u[-1] + 1.0 / 3.0 * u[-1]
self.u_left_boundary_approx[1][-2] = 1.0 / 3.0 * u[-3] + 5.0 / 6.0 * u[-2] - 1.0 / 6.0 * u[-1]
self.u_left_boundary_approx[2][-2] = -1.0 / 6.0 * u[-4] + 5.0 / 6.0 * u[-3] + 1.0 / 3.0 * u[-2]
# Approximations for cell i = N-1 (the rightmost cell).
self.u_right_boundary_approx[0][-1] = 1.0 / 3.0 * u[-1] + 5.0 / 6.0 * u[-1] - 1.0 / 6.0 * u[-1]
self.u_right_boundary_approx[1][-1] = -1.0 / 6.0 * u[-2] + 5.0 / 6.0 * u[-1] + 1.0 / 3.0 * u[-1]
self.u_right_boundary_approx[2][-1] = 1.0 / 3.0 * u[-3] - 7.0 / 6.0 * u[-2] + 11.0 / 6.0 * u[-1]
self.u_left_boundary_approx[0][-1] = 11.0 / 6.0 * u[-1] - 7.0 / 6.0 * u[-1] + 1.0 / 3.0 * u[-1]
self.u_left_boundary_approx[1][-1] = 1.0 / 3.0 * u[-2] + 5.0 / 6.0 * u[-1] - 1.0 / 6.0 * u[-1]
self.u_left_boundary_approx[2][-1] = -1.0 / 6.0 * u[-3] + 5.0 / 6.0 * u[-2] + 1.0 / 3.0 * u[-1]
self.beta[0][2:-2] = 13.0 / 12.0 * (u[2:-2] - 2 * u[3:-1] + u[4:]) ** 2 + \
1.0 / 4.0 * (3*u[2:-2] - 4.0 * u[3:-1] + u[4:]) ** 2
self.beta[1][2:-2] = 13.0 / 12.0 * (u[1:-3] - 2 * u[2:-2] + u[3:-1]) ** 2 + \
1.0 / 4.0 * (u[1:-3] - u[3:-1]) ** 2
self.beta[2][2:-2] = 13.0 / 12.0 * (u[0:-4] - 2 * u[1:-3] + u[2:-2]) ** 2 + \
1.0 / 4.0 * (u[0:-4] - 4.0 * u[1:-3] + 3 * u[2:-2]) ** 2
if self.boundary == 'periodic':
self.beta[0][0] = 13.0 / 12.0 * (u[0] - 2 * u[1] + u[2]) ** 2 + \
1.0 / 4.0 * (3*u[0] - 4.0 * u[1] + u[2]) ** 2
self.beta[1][0] = 13.0 / 12.0 * (u[-2] - 2 * u[0] + u[1]) ** 2 + \
1.0 / 4.0 * (u[-2] - u[1]) ** 2
self.beta[2][0] = 13.0 / 12.0 * (u[-3] - 2 * u[-2] + u[0]) ** 2 + \
1.0 / 4.0 * (u[-3] - 4.0 * u[-2] + 3 * u[0]) ** 2
self.beta[0][1] = 13.0 / 12.0 * (u[1] - 2 * u[2] + u[3]) ** 2 + \
1.0 / 4.0 * (3*u[1] - 4.0 * u[2] + u[3]) ** 2
self.beta[1][1] = 13.0 / 12.0 * (u[0] - 2 * u[1] + u[2]) ** 2 + \
1.0 / 4.0 * (u[0] - u[2]) ** 2
self.beta[2][1] = 13.0 / 12.0 * (u[-2] - 2 * u[0] + u[1]) ** 2 + \
1.0 / 4.0 * (u[-2] - 4.0 * u[0] + 3 * u[1]) ** 2
self.beta[0][-2] = 13.0 / 12.0 * (u[-2] - 2 * u[-1] + u[1]) ** 2 + \
1.0 / 4.0 * (3*u[-2] - 4.0 * u[-1] + u[1]) ** 2
self.beta[1][-2] = 13.0 / 12.0 * (u[-3] - 2 * u[-2] + u[-1]) ** 2 + \
1.0 / 4.0 * (u[-3] - u[-1]) ** 2
self.beta[2][-2] = 13.0 / 12.0 * (u[-4] - 2 * u[-3] + u[-2]) ** 2 + \
1.0 / 4.0 * (u[-4] - 4.0 * u[-3] + 3 * u[-2]) ** 2
self.beta[0][-1] = 13.0 / 12.0 * (u[-1] - 2 * u[1] + u[2]) ** 2 + \
1.0 / 4.0 * (3*u[-1] - 4.0 * u[1] + u[2]) ** 2
self.beta[1][-1] = 13.0 / 12.0 * (u[-2] - 2 * u[-1] + u[1]) ** 2 + \
1.0 / 4.0 * (u[-2] - u[1]) ** 2
self.beta[2][-1] = 13.0 / 12.0 * (u[-3] - 2 * u[-2] + u[-1]) ** 2 + \
1.0 / 4.0 * (u[-3] - 4.0 * u[-2] + 3 * u[-1]) ** 2
elif self.boundary == 'outflow':
print("outflow!")
self.beta[0][0] = 13.0 / 12.0 * (u[0] - 2 * u[1] + u[2]) ** 2 + \
1.0 / 4.0 * (3*u[0] - 4.0 * u[1] + u[2]) ** 2
self.beta[1][0] = 13.0 / 12.0 * (u[0] - 2 * u[0] + u[1]) ** 2 + \
1.0 / 4.0 * (u[0] - u[1]) ** 2
self.beta[2][0] = 13.0 / 12.0 * (u[0] - 2 * u[0] + u[0]) ** 2 + \
1.0 / 4.0 * (u[0] - 4.0 * u[0] + 3 * u[0]) ** 2
self.beta[0][1] = 13.0 / 12.0 * (u[1] - 2 * u[2] + u[3]) ** 2 + \
1.0 / 4.0 * (3*u[1] - 4.0 * u[2] + u[3]) ** 2
self.beta[1][1] = 13.0 / 12.0 * (u[0] - 2 * u[1] + u[2]) ** 2 + \
1.0 / 4.0 * (u[0] - u[2]) ** 2
self.beta[2][1] = 13.0 / 12.0 * (u[0] - 2 * u[0] + u[1]) ** 2 + \
1.0 / 4.0 * (u[0] - 4.0 * u[0] + 3 * u[1]) ** 2
self.beta[0][-2] = 13.0 / 12.0 * (u[-2] - 2 * u[-1] + u[-1]) ** 2 + \
1.0 / 4.0 * (3*u[-2] - 4.0 * u[-1] + u[-1]) ** 2
self.beta[1][-2] = 13.0 / 12.0 * (u[-3] - 2 * u[-2] + u[-1]) ** 2 + \
1.0 / 4.0 * (u[-3] - u[-1]) ** 2
self.beta[2][-2] = 13.0 / 12.0 * (u[-4] - 2 * u[-3] + u[-2]) ** 2 + \
1.0 / 4.0 * (u[-4] - 4.0 * u[-3] + 3 * u[-2]) ** 2
self.beta[0][-1] = 13.0 / 12.0 * (u[-1] - 2 * u[-1] + u[-1]) ** 2 + \
1.0 / 4.0 * (3*u[-1] - 4.0 * u[-1] + u[-1]) ** 2
self.beta[1][-1] = 13.0 / 12.0 * (u[-2] - 2 * u[-1] + u[-1]) ** 2 + \
1.0 / 4.0 * (u[-2] - u[-1]) ** 2
self.beta[2][-1] = 13.0 / 12.0 * (u[-3] - 2 * u[-2] + u[-1]) ** 2 + \
1.0 / 4.0 * (u[-3] - 4.0 * u[-2] + 3 * u[-1]) ** 2
self.alpha_right = self.iw_right / ((self.EPS + self.beta) ** 2)
self.alpha_left = self.iw_left / ((self.EPS + self.beta) ** 2)
self.sum_alpha_right = self.alpha_right[0] + self.alpha_right[1] + self.alpha_right[2]
self.sum_alpha_left = self.alpha_left[0] + self.alpha_left[1] + self.alpha_left[2]
self.omega_right = self.alpha_right / self.sum_alpha_right
self.omega_left = self.alpha_left / self.sum_alpha_left
self.u_right_boundary = self.omega_right[0] * self.u_right_boundary_approx[0] + \
self.omega_right[1] * self.u_right_boundary_approx[1] + \
self.omega_right[2] * self.u_right_boundary_approx[2]
self.u_left_boundary = self.omega_left[0] * self.u_left_boundary_approx[0] + \
self.omega_left[1] * self.u_left_boundary_approx[1] + \
self.omega_left[2] * self.u_left_boundary_approx[2]
def identity_numflux(self, a, b):
"""
flux is identity function.
"""
maxval = 1
return 0.5 * (a + b - maxval*(b - a))
def numflux(self, a, b):
"""
Return Lax-Friedrichs numerical flux.
"""
flux = self.flux
max_flux_deriv = self.max_flux_deriv
maxval = max_flux_deriv(a, b)
return 0.5 * (flux(a) + flux(b) - maxval * (b - a))
def get_x_center(self):
return self.x_center
def get_x_boundary(self):
return self.x_boundary
def get_dx(self):
return self.dx
class weno3_fv():
'''
This class wraps the finite volume 5-th order WENO.
'''
def __init__(self, flux_name):
'''
Arg flux (str): specifies the flux function.
'''
self.flux_name = flux_name
### the burgers flux function. Can be changed to any other functions if needed for future useage.
def flux(self, val):
if self.flux_name == 'u2':
return val ** 2 / 2.
elif self.flux_name == 'u4':
return val ** 4 / 16.
elif self.flux_name == 'u3':
return val ** 3 / 9.
def flux_deriv(self, val):
if self.flux_name == 'u2':
return val
elif self.flux_name == 'u4':
return val ** 3 / 4.
elif self.flux_name == 'u3':
return val ** 2 / 3
def max_flux_deriv(self, a, b):
if self.flux_name == 'u2':
return np.maximum(np.abs(a), np.abs(b))
elif self.flux_name == 'u4':
return np.maximum(np.abs(a ** 3 / 4.), np.abs(b ** 3 / 4.))
elif self.flux_name == 'u3':
return np.maximum(np.abs(a ** 2 / 3.), np.abs(b ** 2 / 3.))
def solve(self, x_center, t, u0, cfl, eta=0):
'''
This function wraps the process of using finite volume WENO to evolve a grid.
Arg x_center (1d np array): the initial x grid
Arg t (float): evolving time
Arg u0 (1d np array, same shape as x_center): inital values of u on the initial x grid x_center.
Arg cfl (float): the cfl number used to determine temporal step size dt.
Return: the whole evolving grid from time 0 to t.
'''
dx = x_center[1] - x_center[0]
left_boundary = x_center[0] - dx * 0.5
right_boundary = x_center[-1] + dx * 0.5
ncells = len(x_center)
dt = dx * cfl
num_t = int(t / dt) + 1
w = Weno3(left_boundary, right_boundary, ncells, self.flux, self.flux_deriv, self.max_flux_deriv,
dx = dx, dt = dt, cfl_number = cfl, num_t=num_t, eta=eta)
w.integrate(u0, t)
return w.u_grid[:num_t,:]
if __name__ == '__main__':
"""
sample usage.
"""
flux_func = 'u2'
import argparse
args = argparse.ArgumentParser()
args.add_argument('--x_low', type = float, default = 0)
args.add_argument('--x_high', type = float, default = 1)
args.add_argument('--dx', type = float, default = 0.02)
args.add_argument('--cfl', type = float, default = 0.1)
args.add_argument('--T', type = float, default = 0.5)
args.add_argument('--save_path', type = str, default =None)
args = args.parse_args()
def get_weno_grid(init_condition, dx = 0.001, dt = 0.0001, T = 0.8, x_low = -1, x_high = 1,
boundary='periodic', eta=0.04):
"""
dx: grid size. e.g. 0.02
dt: time step size. e.g. 0.004
T: evolve time. e.g. 1
x_low: if the grid is in the range [-1,1], then x_low = -1.
x_high: if the grid is in the range [-1,1], then x_low = 1.
init_condition: a function that computes the initial values of u. e.g. the init func above.
return: a grid of size num_t x num_x
where num_t = T/dt + 1
num_x = (x_high - x_low) / dx + 1
"""
left_boundary = x_low - dx * 0.5
right_boundary = x_high + dx * 0.5
ncells = int((x_high - x_low) / dx + 1.) # need to be very small
num_t = int(T/dt + 1.)
w = Weno3(left_boundary, right_boundary, ncells, flux, flux_deriv, max_flux_deriv,
dx = dx, dt = dt, num_t = num_t + 100, boundary=boundary, eta=eta)
x_center = w.get_x_center()
u0 = init_condition(x_center)
w.integrate(u0, T)
solutions = w.u_grid[:num_t,:]
return solutions
### the burgers flux function. Can be changed to any other functions if needed for future useage.
def flux(val, flux = flux_func):
if flux == 'u2':
return val ** 2 / 2.
elif flux == 'u4':
return val ** 4 / 16.
elif flux == 'u3':
return val ** 3 / 9.
elif flux == 'BL':
return val ** 2 / (val ** 2 + 0.5 * (1-val) ** 2)
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return a * val
def flux_deriv(val, flux = flux_func):
if flux == 'u2':
return val
elif flux == 'u4':
return val ** 3 / 4.
elif flux == 'u3':
return val ** 2 / 3
elif flux == 'BL':
return (val - 3 * val**2) / (val ** 2 + 0.5 * (1 - val)**2) ** 2
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return a
def max_flux_deriv(a, b, flux = flux_func):
if flux == 'u2':
return np.maximum(np.abs(a), np.abs(b))
elif flux == 'u4':
return np.maximum(np.abs(a ** 3 / 4.), np.abs(b ** 3 / 4.))
elif flux == 'u3':
return np.maximum(np.abs(a ** 2 / 3.), np.abs(b ** 2 / 3.))
elif flux == 'BL':
a = (a - 3 * a**2) / (a ** 2 + 0.5 * (1 - a)**2) ** 2
b = (a - 3 * b**2) / (b ** 2 + 0.5 * (1 - b)**2) ** 2
return np.maximum(np.abs(a), np.abs(b))
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return np.abs(a)
def show(solutions, x_gird, save_path=None):
"""
plot a animation of the evolving process.
solutions: solution array returned by get_weno_grid.
x_grid: e.g. [-1, -0.96, ..., 0.96, 1]
"""
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import animation
### you need to install ffmpeg if you want to store the animations
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
x_low = x_grid[0]
x_high = x_grid[-1]
num_x = len(x_grid)
num_t = len(solutions)
print(x_low, x_high)
fig = plt.figure(figsize = (15, 10))
ax = fig.add_subplot(1,1,1)
ax.set_xlim((x_low, x_high))
ymin = np.min(solutions[0]) - 0.1
ymax = np.max(solutions[0]) + 0.1
ax.set_ylim((ymin, ymax)) ## you might want to change these params
line, = ax.plot(x_grid, [0 for _ in range(num_x)], lw = 2, label = 'solution')
def init():
line.set_data([], [])
return line
def func(i):
# print('make animations, step: ', i)
x = np.linspace(x_low, x_high, num_x)
y = solutions[i]
line.set_data(x, y)
return line
anim = animation.FuncAnimation(fig=fig, func=func, init_func=init, frames=num_t, interval=50)
plt.legend()
plt.title('Solutions')
plt.tight_layout()
if save_path is not None:
save_name = save_path
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save("./data/video/" + save_name + ".mp4", writer=writer)
plt.show()
plt.close()
# import time
# test_time = 20
# time_costs = []
# for i in range(test_time):
# start = time.time()
# solutions = get_weno_grid(dx = 0.002, dt = 0.0004, init_condition = init)
# tmp = time.time() - start
# print('test {}, use time {}'.format(i, tmp))
# time_costs.append(tmp)
# print("good weno average time: ", np.mean(time_costs))
def init_simple(x, t=0):
return -0.8412740798850631 + 1.6933719575704096 * np.sin(6 * np.pi * x) + 1.47 * np.cos(6 * np.pi * x)
init = init_simple
dx = args.dx
dt = dx * args.cfl
x_low = args.x_low
x_high = args.x_high
T = args.T
boundary = 'periodic'
solutions = get_weno_grid(x_low = x_low, x_high = x_high, dx = dx, dt = dt, init_condition = init, T = T, boundary=boundary, eta=0.04)
# solutions = get_weno_grid(init_condition = init)
print(solutions)
num_x = int((x_high - x_low) / dx) + 1
x_grid = np.linspace(x_low, x_high, num_x)
# print(x_grid)
show(solutions, x_grid, args.save_path)
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,747
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/get_args.py
|
'''
File Description:
This file defines all the command line arguments.
'''
import argparse
import sys
import numpy as np
# from tensorboardX import SummaryWriter
def get_args():
'''
This function defines and parses the command line arguments, returns a namespace variable that
holds all the necessary arguments for the entire training procedure.
'''
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument('--load_path', type=str, default=None)
argparser.add_argument('--save_path', type=str, default=None)
argparser.add_argument('--cuda', type=int, default=0)
argparser.add_argument('--exp_id', type=str, default='debug')
argparser.add_argument('--residual', type=int, default=0)
argparser.add_argument('--supervise_weno_coef', type=float, default=0)
### PDE settings
argparser.add_argument('--dt', type=float, default=0.002)
argparser.add_argument('--dx', type=float, default=0.02)
argparser.add_argument('--precise_dx', type=float, default=0.001)
argparser.add_argument('--precise_dt', type=float, default=0.00005)
argparser.add_argument('--x_low', type=float, default=0)
argparser.add_argument('--x_high', type=float, default=1)
argparser.add_argument('--initial_t', type=float, default=0.)
argparser.add_argument('--T', type=float, default=0.8)
argparser.add_argument('--trainT', type=float, default=0.5)
argparser.add_argument('--boundary_condition', type=str, default='periodic')
argparser.add_argument('--Tscheme', type=str, default='euler')
argparser.add_argument('--init', type=str, default='sinoffset')
argparser.add_argument('--cfl', type=float, default=0.1)
argparser.add_argument('--flux', type=str, default='u2')
### Training Env Settings
argparser.add_argument('--state_mode', type=str, default='normalize', help = 'normalize, unnormalize, mix; see torch_network.py \
in DDPG for more details.')
argparser.add_argument('--input_normalize', type=int, default=0)
argparser.add_argument('--num_train', type=int, default=6)
argparser.add_argument('--num_test', type=int, default=5)
argparser.add_argument('--state_window_size', type=int, default=2) ### v4 TODO: check
argparser.add_argument('--action_window_size', type=int, default=2) ### v4 TODO: check
argparser.add_argument('--state_dim', type=int, default=11)
argparser.add_argument('--action_dim', type=int, default=5)
argparser.add_argument('--mode', type=str, default='weno_coef_four')
argparser.add_argument('--agent', type=str, default='ddpg')
argparser.add_argument('--formulation', type=str, default='MLP') ### v4 TODO: remove
### Training logics settings
argparser.add_argument('--record_every', type=int, default=1)
argparser.add_argument('--debug', type=bool, default=False)
argparser.add_argument('--test_every', type=int, default=100)
argparser.add_argument('--save_every', type=int, default=100)
argparser.add_argument('--train_epoch', type=int, default=30000)
argparser.add_argument('--num_steps', type=int, default=1000000)
argparser.add_argument('--large_scale_train', type=int, default=0, help = '\
whether to perform large-scale training. If set 1, random sample 40 initial conditions, set smaller learning rate, \
and deeper networks')
### Reward Settings
argparser.add_argument('--reward_type', type = str, default = 'neighbor', help = 'rewarding mechanism, \
{neighbor, single, all}')
argparser.add_argument('--reward_scale', type = int, default = 0)
### about visulization
argparser.add_argument('--show_RL_weno_animation', type=int, default=0, help = 'whether to show the evolving animation')
argparser.add_argument('--save_RL_weno_animation_path', type=str, default='./data/video/',
help = 'save path of the evovling animation')
argparser.add_argument('--test', type=bool, default=False)
argparser.add_argument('--animation', type=int, default=0, help = 'whether to plot the evolving animation')
argparser.add_argument('--video_interval', type=int, default=2000)
### General RL Algorithm Parameters
argparser.add_argument('--gamma', type=float, default=0.99)
argparser.add_argument('--a_lr', type=float, default=1e-4)
argparser.add_argument('--final_a_lr', type=float, default=1e-7)
argparser.add_argument('--c_lr', type=float, default=1e-3)
argparser.add_argument('--final_c_lr', type=float, default=1e-7)
argparser.add_argument('--batch_size', type=int, default=64)
argparser.add_argument('--optimizer', type=str, default='adam')
argparser.add_argument('--hidden_layer_num', type=int, default=6)
argparser.add_argument('--max_grad_norm', type=float, default=1)
argparser.add_argument('--clip_gradient', type=int, default=0)
argparser.add_argument('--sl_lr', type=float, default=1e-4)
argparser.add_argument('--sl_train_step', type=float, default=1)
### DQN and DDPG and SAC parameter
argparser.add_argument('--tau', type=float, default=0.02, help = 'soft update target network param')
argparser.add_argument('--update_every', type=int, default=1, help = 'interval of hard copy params to the target network')
argparser.add_argument('--replay_traj_num', type=int, default=10000)
argparser.add_argument('--replay_size', type=int, default=500000)
argparser.add_argument('--noise', type=str, default='action')
argparser.add_argument('--noise_beg', type = float, default = 0.5, help = 'initial noise scale')
argparser.add_argument('--noise_end', type = float, default = 0.001, help = 'final noise scale')
argparser.add_argument('--noise_dec', type = float, default = 0.02, help = 'how much noise to decrease at a time')
argparser.add_argument('--noise_dec_every', type = int, default = 400, help = 'interval of each noise decrease')
argparser.add_argument('--num_process', type = int, default =4, help = 'num of paraller threadings.')
argparser.add_argument('--ddpg_train_iter', type = int, default = 20)
argparser.add_argument('--ddpg_value_train_iter', type = int, default = 5)
argparser.add_argument('--update_mode', type = str, default = 'soft')
argparser.add_argument('--multistep_return', type = int, default = 5, help = 'bellman equation unroll steps when updating the Q-network')
argparser.add_argument('--ddpg_net', type = str, default = 'roe')
argparser.add_argument('--automatic_entropy_tuning', type = int, default = 1, help='\
whether to use sac v2')
argparser.add_argument('--alpha', type = int, default = 0.2, help='sac alpha')
argparser.add_argument('--hidden_size', type = int, default = 64, help='sac network hidden_size')
argparser.add_argument('--updates_per_step', type = int, default = 1, help='sac gradient step')
### random seeds
argparser.add_argument('--seed', type=int, default=6)
args = argparser.parse_args()
### just support weno-coef 4 now
# args.state_dim = 7 + 8 ### add weno weight as input: trying to learn a residual
args.state_dim = 7 ### nips model
args.action_dim = 8
assert np.abs(args.dx * args.cfl - args.dt) < 1e-10, "dx, dt and cfl should match!"
### for DDPG, have to carefully set the replay memory size. It should at least hold several trajectories.
if args.agent == 'ddpg':
args.batch_size = args.batch_size * args.num_process
args.replay_size = args.replay_traj_num
# evolve_step = int(args.trainT / (args.dt))
# num_x = int((args.x_high - args.x_low) / args.dx)
# pair_per_train = evolve_step * num_x * args.num_process
# args.batch_size = pair_per_train // args.ddpg_train_iter * 10
# args.replay_size = 100 * pair_per_train
return args
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,748
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/scripts/test_trained_model_forcing.py
|
'''
this file tests RL-WENO under the setting of L3D.
'''
import json
import os
import os.path as osp
from BurgersEnv.Burgers import Burgers
from DDPG.DDPG_new import DDPG
from DDPG.util import GaussNoise
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import utils.ptu as ptu
import time
import torch
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--eta', type=float, default=0)
args = parser.parse_args()
# non-viscous model
# path = 'data/seuss/9-20-many-64-multiple-dx-normalize-eta-0-all-break/9-20-many-64-multiple-dx-normalize-eta-0-all-break_2020_09_20_04_04_24_0006/'
# epoch = '1200'
# forcing modle
# path = 'data/seuss/9-25-many-64-multiple-dx-forcing-eta-0.01/9-25-many-64-multiple-dx-forcing-eta-0.01_2020_09_25_05_35_10_0006/'
# epoch = '4900'
# viscous model
path = 'data/seuss/9-23-many-64-multiple-dx-normalize-eta-0.01/9-23-many-64-multiple-dx-normalize-eta-0.01_2020_09_25_03_49_37_0005/'
epoch = '12800'
vv = json.load(open(osp.join(path, 'variant.json')))
if vv.get('batch_norm') is None:
vv['batch_norm'] = False
if vv.get('dx') is None:
vv['dx'] = 0.04
if vv.get('eta') is None:
vv['eta'] = 0
eta_list = [0.01, 0.02, 0.04]
dx_list = [0.02, 0.04, 0.05]
dx_list = [dx * np.pi for dx in dx_list]
RL_errors = np.zeros((len(eta_list), len(dx_list), 2))
weno_errors = np.zeros((len(eta_list), len(dx_list), 2))
for eta_idx, eta in enumerate(eta_list):
vv['eta'] = eta
vv['solution_data_path'] = 'data/local/solutions/9-24-50-eta-{}-forcing-1-regenerate'.format(eta)
print(vv)
ddpg = DDPG(vv, GaussNoise(initial_sig=vv['noise_beg'], final_sig=vv['noise_end']))
agent = ddpg
agent.load(osp.join(path, epoch), actor_only=True)
print("agent loaded!")
env = Burgers(vv, agent=agent)
len_dt_list = 1
RL_all_errors = []
weno_all_errors = []
for x_idx, dx in enumerate(dx_list):
dt_list = [dx * 0.1]
for t_idx, dt in enumerate(dt_list):
print("test on eta {} dx {} dt {}".format(eta, dx, dt))
num_t = int(0.9 * np.pi / dt)
RL_error = []
weno_error = []
for solution_idx in range(25, 50):
pre_state = env.reset(solution_idx=solution_idx, num_t=num_t, dx=dx, dt=dt)
horizon = env.num_t
for t in range(1, horizon):
action = agent.action(pre_state, deterministic=True) # action: (state_dim -2, 1) batch
next_state, reward, done, _ = env.step(action, Tscheme='rk4')
pre_state = next_state
error, relative_error = env.error('rk4')
weno_error_rk4 = env.weno_error_rk4
RL_error.append(error)
weno_error.append(weno_error_rk4)
print("solution_idx {} relative error {}".format(solution_idx, relative_error))
RL_all_errors.append(RL_error)
weno_all_errors.append(weno_error)
RL_errors[eta_idx][x_idx][0] = np.mean(RL_error)
weno_errors[eta_idx][x_idx][0] = np.mean(weno_error)
RL_errors[eta_idx][x_idx][1] = np.std(RL_error)
weno_errors[eta_idx][x_idx][1] = np.std(weno_error)
data_dict = {
'eta_list': eta_list,
'dx_list': dx_list,
'error_RL': RL_errors,
'error_weno': weno_errors,
'all_errors_RL': RL_all_errors,
'all_errors_weno': weno_all_errors
}
torch.save(data_dict, 'data/ComPhy/RL_and_weno_error_forcing_eta_{}.pkl'.format(epoch))
for x_idx, dx in enumerate(dx_list):
for eta_idx, eta in enumerate(eta_list):
RL_error_mean = round(RL_errors[eta_idx][x_idx][0] * 100, 2)
weno_error_mean = round(weno_errors[eta_idx][x_idx][0] * 100, 2)
RL_error_std = round(RL_errors[eta_idx][x_idx][1] * 100, 2)
weno_error_std = round(weno_errors[eta_idx][x_idx][1] * 100, 2)
print("& {} ({}) & {} ({})".format(RL_error_mean, RL_error_std,
weno_error_mean, weno_error_std), end=' ')
print('\\\\\\hline')
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,749
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/Weno/finite_difference_weno.py
|
'''
File Description:
This file contains the class weno3_fd, which implements the 5-th order finite difference WENO.
See the bottom for sample usage.
'''
import numpy as np
import copy
from matplotlib import pyplot as plt
class weno3_fd():
'''
This class implements the finite difference 5-th order WENO scheme.
Arg args (python namespace):
should contain the following domain: x_high, x_low, dx, cfl, T, flux, Tscheme
Arg init_value (np array):
specifies the initial value.
'''
def __init__(self, args, init_value=None, forcing=None, num_x=None, num_t=None, dt=None, dx=None):
self.args = copy.copy(args)
self.x_high, self.x_low = args.x_high, args.x_low
self.dx, self.dt, self.T = args.dx, args.dx * args.cfl, args.T
if dx is not None:
self.dx = dx
if dt is not None:
self.dt = dt
if num_x is None:
self.num_x = int((self.x_high - self.x_low) / self.dx + 1)
else:
self.num_x = num_x
if num_t is None:
self.num_t = int(self.T/self.dt + 1)
else:
self.num_t = num_t
self.grid = np.zeros((self.num_t, self.num_x)) # record the value at each (x,t) point
self.grid[0,:] = init_value
self.eta = args.eta # coefficient of viscous term
self.forcing = forcing # the forcing term
self.x_grid = np.linspace(self.x_low, self.x_high, self.num_x)
def flux(self, u):
if self.args.flux == 'u2':
return u ** 2 / 2.
elif self.args.flux == 'u4':
return u ** 4 / 16.
elif self.args.flux == 'u3':
return u ** 3 / 9.
elif self.args.flux == 'BL':
return u ** 2 / (u ** 2 + 0.5 * (1-u) ** 2)
elif self.args.flux.startswith("linear"):
a = float(self.args.flux[len('linear'):])
return a * u
def expand_boundary(self, val, left_width, right_width = None, mode = 'periodic'):
'''
expand the boundary points.
'''
if right_width is None:
right_width = left_width
if mode == 'periodic':
tmp = list(val[-left_width - 1:-1]) + list(val) + list(val[1:right_width + 1])
elif mode == 'outflow':
tmp = list(val[:left_width]) + list(val) + list(val[-right_width:])
else:
raise('Invalide Boundary Condition!')
return tmp
def get_flux(self, u):
u_expand = self.expand_boundary(u, 3)
flux_left = np.zeros(self.num_x + 1)
flux_right = np.zeros(self.num_x + 1)
flux = np.zeros(self.num_x + 1)
dleft2, dleft1, dleft0 = 0.1, 0.6, 0.3 ### ideal weight for reconstruction of the minus index (spatial right) boundary. (or the minus one in the book.)
dright2, dright1, dright0 = 0.3, 0.6, 0.1
for i in range(self.num_x + 1):
left_used = u_expand[i:i+5]
right_used = u_expand[i+1:i+6]
fl = self.flux(np.array(left_used))
fr = self.flux(np.array(right_used))
betal0 = 13 / 12 * (fl[2] - 2 * fl[3] + fl[4]) ** 2 + 1 / 4 * (3 * fl[2] - 4 * fl[3] + fl[4]) ** 2
betal1 = 13 / 12 * (fl[1] - 2 * fl[2] + fl[3]) ** 2 + 1 / 4 * (fl[1] - fl[3]) ** 2
betal2 = 13 / 12 * (fl[0] - 2 * fl[1] + fl[2]) ** 2 + 1 / 4 * (fl[0] - 4 * fl[1] + 3 * fl[2]) ** 2
betar0 = 13 / 12 * (fr[2] - 2 * fr[3] + fr[4]) ** 2 + 1 / 4 * (3 * fr[2] - 4 * fr[3] + fr[4]) ** 2
betar1 = 13 / 12 * (fr[1] - 2 * fr[2] + fr[3]) ** 2 + 1 / 4 * (fr[1] - fr[3]) ** 2
betar2 = 13 / 12 * (fr[0] - 2 * fr[1] + fr[2]) ** 2 + 1 / 4 * (fr[0] - 4 * fr[1] + 3 * fr[2]) ** 2
eps = 1e-6
alphal0 = dleft0 / (betal0 + eps) ** 2
alphal1 = dleft1 / (betal1 + eps) ** 2
alphal2 = dleft2 / (betal2 + eps) ** 2
wl0 = alphal0 / (alphal0 + alphal1 + alphal2)
wl1 = alphal1 / (alphal0 + alphal1 + alphal2)
wl2 = alphal2 / (alphal0 + alphal1 + alphal2)
alphar0 = dright0 / (betar0 + eps) ** 2
alphar1 = dright1 / (betar1 + eps) ** 2
alphar2 = dright2 / (betar2 + eps) ** 2
wr0 = alphar0 / (alphar0 + alphar1 + alphar2)
wr1 = alphar1 / (alphar0 + alphar1 + alphar2)
wr2 = alphar2 / (alphar0 + alphar1 + alphar2)
fl2 = fl[0] * 1 / 3 + fl[1] * (- 7 / 6) + fl[2] * 11 / 6
fl1 = fl[1] * (-1 / 6) + fl[2] * (5 / 6) + fl[3] * 1 / 3
fl0 = fl[2] * 1 / 3 + fl[3] * (5 / 6) + fl[4] * -1 / 6
fr2 = fr[0] * -1 / 6 + fr[1] * (5 / 6) + fr[2] * 1 / 3
fr1 = fr[1] * (1 / 3) + fr[2] * (5 / 6) + fr[3] * -1 / 6
fr0 = fr[2] * 11 / 6 + fr[3] * (-7 / 6) + fr[4] * 1 / 3
flux_left[i] = wl0 * fl0 + wl1 * fl1 + wl2 * fl2
flux_right[i] = wr0 * fr0 + wr1 * fr1 + wr2 * fr2
if self.args.flux == 'u2' or self.args.flux == 'u4':
roe = (u_expand[i + 3] + u_expand[i + 2]) ### last line reduces to this line with f = 1/2 u ** 2
elif self.args.flux == 'u3':
roe = (u_expand[i + 3] ** 2 + u_expand[i + 2] ** 2 + u_expand[i + 2] * u_expand[i + 3])
judge = roe
if judge >= 0:
flux[i] = flux_left[i]
else:
flux[i] = flux_right[i]
return flux[:-1], flux[1:]
def obtain_flux(self, u, identity=False):
u_expand = self.expand_boundary(u, 3)
flux_left = np.zeros(self.num_x + 1)
flux_right = np.zeros(self.num_x + 1)
flux = np.zeros(self.num_x + 1)
dleft2, dleft1, dleft0 = 0.1, 0.6, 0.3 ### ideal weight for reconstruction of the minus index (spatial right) boundary. (or the minus one in the book.)
dright2, dright1, dright0 = 0.3, 0.6, 0.1
for i in range(self.num_x + 1):
left_used = u_expand[i:i+5]
right_used = u_expand[i+1:i+6]
if not identity:
fl = self.flux(np.array(left_used))
fr = self.flux(np.array(right_used))
else:
fl = np.array(left_used)
fr = np.array(right_used)
betal0 = 13 / 12 * (fl[2] - 2 * fl[3] + fl[4]) ** 2 + 1 / 4 * (3 * fl[2] - 4 * fl[3] + fl[4]) ** 2
betal1 = 13 / 12 * (fl[1] - 2 * fl[2] + fl[3]) ** 2 + 1 / 4 * (fl[1] - fl[3]) ** 2
betal2 = 13 / 12 * (fl[0] - 2 * fl[1] + fl[2]) ** 2 + 1 / 4 * (fl[0] - 4 * fl[1] + 3 * fl[2]) ** 2
betar0 = 13 / 12 * (fr[2] - 2 * fr[3] + fr[4]) ** 2 + 1 / 4 * (3 * fr[2] - 4 * fr[3] + fr[4]) ** 2
betar1 = 13 / 12 * (fr[1] - 2 * fr[2] + fr[3]) ** 2 + 1 / 4 * (fr[1] - fr[3]) ** 2
betar2 = 13 / 12 * (fr[0] - 2 * fr[1] + fr[2]) ** 2 + 1 / 4 * (fr[0] - 4 * fr[1] + 3 * fr[2]) ** 2
eps = 1e-6
alphal0 = dleft0 / (betal0 + eps) ** 2
alphal1 = dleft1 / (betal1 + eps) ** 2
alphal2 = dleft2 / (betal2 + eps) ** 2
wl0 = alphal0 / (alphal0 + alphal1 + alphal2)
wl1 = alphal1 / (alphal0 + alphal1 + alphal2)
wl2 = alphal2 / (alphal0 + alphal1 + alphal2)
alphar0 = dright0 / (betar0 + eps) ** 2
alphar1 = dright1 / (betar1 + eps) ** 2
alphar2 = dright2 / (betar2 + eps) ** 2
wr0 = alphar0 / (alphar0 + alphar1 + alphar2)
wr1 = alphar1 / (alphar0 + alphar1 + alphar2)
wr2 = alphar2 / (alphar0 + alphar1 + alphar2)
fl2 = fl[0] * 1 / 3 + fl[1] * (- 7 / 6) + fl[2] * 11 / 6
fl1 = fl[1] * (-1 / 6) + fl[2] * (5 / 6) + fl[3] * 1 / 3
fl0 = fl[2] * 1 / 3 + fl[3] * (5 / 6) + fl[4] * -1 / 6
fr2 = fr[0] * -1 / 6 + fr[1] * (5 / 6) + fr[2] * 1 / 3
fr1 = fr[1] * (1 / 3) + fr[2] * (5 / 6) + fr[3] * -1 / 6
fr0 = fr[2] * 11 / 6 + fr[3] * (-7 / 6) + fr[4] * 1 / 3
flux_left[i] = wl0 * fl0 + wl1 * fl1 + wl2 * fl2
flux_right[i] = wr0 * fr0 + wr1 * fr1 + wr2 * fr2
# roe = (self.flux(u_expand[i + 3]) - self.flux(u_expand[i + 2])) / (u_expand[i + 3] - u_expand[i + 2])
if self.args.flux == 'u2' or self.args.flux == 'u4':
roe = (u_expand[i + 3] + u_expand[i + 2]) ### last line reduces to this line with f = 1/2 u ** 2
elif self.args.flux == 'u3':
# print('enter here')
roe = (u_expand[i + 3] ** 2 + u_expand[i + 2] ** 2 + u_expand[i + 2] * u_expand[i + 3])
elif self.args.flux == 'BL':
roe = 0.5 * (u_expand[i + 3] + u_expand[i + 2]) - u_expand[i + 2] * u_expand[i + 3]
elif self.args.flux.startswith("linear"):
roe = float(self.args.flux[len('linear'):])
if identity:
roe = 1
judge = roe
if judge >= 0:
flux[i] = flux_left[i]
else:
flux[i] = flux_right[i]
return flux
def evolve(self, u, i):
### below implements weno
### here, left and right means the minus and plus (i.e., upwind direction) when computing the flux at the same location.
def rhs(u):
flux = self.obtain_flux(u)
rhs = -(flux[1:] - flux[:-1]) / self.dx
if self.eta > 0:
# identity_flux = self.obtain_flux(u, identity=True)
# u_x = (identity_flux[1:] - identity_flux[:-1]) / self.dx
# identity_flux = self.obtain_flux(u_x, identity=True)
# u_xx = (identity_flux[1:] - identity_flux[:-1]) / self.dx
# u_x = np.zeros_like(u)
# u_x[1:-1] = (u[2:] - u[:-2]) / (2 * self.dx)
# u_x[0] = (u[1] - u[-2]) / (2 * self.dx)
# u_x[-1] = (u[1] - u[-2]) / (2 * self.dx)
# u_xx = np.zeros_like(u_x)
# u_xx[1:-1] = (u_x[2:] - u_x[:-2]) / (2 * self.dx)
# u_xx[0] = (u_x[1] - u_x[-2]) / (2 * self.dx)
# u_xx[-1] = (u_x[1] - u_x[-2]) / (2 * self.dx)
u_xx = np.zeros_like(u)
u_xx[1:-1] = (u[2:] + u[:-2] - 2 * u[1:-1]) / (self.dx ** 2)
u_xx[0] = (u[1] + u[-2] - 2 * u[0]) / (self.dx ** 2)
u_xx[-1] = (u[1] + u[-2] - 2 * u[-1]) / (self.dx ** 2)
rhs += self.eta * u_xx
if self.forcing is not None:
period = self.x_high - self.x_low
rhs += self.forcing(x=self.x_grid, t=i*self.dt, period=period)
return rhs
### this runs rk4
u_next_1 = u + self.dt * rhs(u)
u_next_2 = (3 * u + u_next_1 + self.dt * rhs(u_next_1)) / 4
u_next = (u + 2 * u_next_2 + 2 * self.dt * rhs(u_next_2)) / 3
if self.args.Tscheme == 'rk4':
return u_next
elif self.args.Tscheme == 'euler':
return u_next_1
def solve(self):
for i in range(1, self.num_t):
self.grid[i] = self.evolve(self.grid[i-1], i-1)
return self.grid
if __name__ == '__main__':
import argparse, sys
args = argparse.ArgumentParser(sys.argv[0])
args.add_argument('--x_low', type = float, default = 0)
args.add_argument('--x_high', type = float, default = 1)
args.add_argument('--dx', type = float, default = 0.01)
args.add_argument('--cfl', type = float, default = 0.1)
args.add_argument('--T', type = float, default = 0.07)
args.add_argument('--Tscheme', type = str, default = 'euler')
args.add_argument('--flux', type = str, default = 'u2')
args.add_argument('--eta', type = float, default = 0)
args.add_argument('--save_path', type = str, default =None)
args = args.parse_args()
def init_simple(x, t=0):
return 0.5 + np.sin(2 * np.pi * x)
def show(solutions, x_gird, save_path=None):
"""
plot a animation of the evolving process.
solutions: solution array returned by get_weno_grid.
x_grid: e.g. [-1, -0.96, ..., 0.96, 1]
"""
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import animation
### you need to install ffmpeg if you want to store the animations
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
x_low = x_grid[0]
x_high = x_grid[-1]
num_x = len(x_grid)
num_t = len(solutions)
fig = plt.figure(figsize = (15, 10))
ax = fig.add_subplot(1,1,1)
ax.set_xlim((x_low, x_high))
ymin = np.min(solutions[0]) - 0.1
ymax = np.max(solutions[0]) + 0.1
ax.set_ylim((ymin, ymax)) ## you might want to change these params
line, = ax.plot(x_grid, [0 for _ in range(num_x)], lw = 2, label = 'solution')
def init():
line.set_data([], [])
return line
def func(i):
# print('make animations, step: ', i)
x = np.linspace(x_low, x_high, num_x)
y = solutions[i]
line.set_data(x, y)
return line
anim = animation.FuncAnimation(fig=fig, func=func, init_func=init, frames=num_t, interval=200)
plt.legend()
plt.title('Solutions')
plt.tight_layout()
if save_path is not None:
save_name = save_path
anim.save("./data/video/" + save_name + ".mp4", writer=writer)
# you need to install "ffmpeg" for storing animations.
plt.show()
plt.close()
init = init_simple
num_x = int((args.x_high - args.x_low) / args.dx + 1)
x_grid = np.linspace(args.x_low, args.x_high, num_x)
init_value = init(x_grid)
# plt.plot(range(len(x_grid)), x_grid)
# plt.plot(x_grid, init_value)
# plt.show()
# import time
# test_time = 20
# fd_weno_solver = weno3_fd(args, init_value = init_value)
# timecosts = []
# for i in range(test_time):
# start = time.time()
# solutions = fd_weno_solver.solve()
# tmp = time.time() - start
# print("test {}, use time {}".format(i, tmp))
# timecosts.append(tmp)
# print("poor weno average time: ", np.mean(timecosts))
fd_weno_solver = weno3_fd(args, init_value = init_value)
solutions = fd_weno_solver.solve()
show(solutions, x_grid, args.save_path)
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,750
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/scripts/generate_solutions.py
|
'''
File Description:
randomly generate 40 initial conditions in the form
''a + b sin or cos(c * \\pi * x), c = {6}''
pre-compute and store the solutions in the dir ../weno_solutions/.
Name conventions for precise solutions:
'../weno_solutions/{}-precise-{}-{}.npy'.format(init_condition_name, flux_name, cfl_value)
Name conventions for weno solutions under coarse grid:
'../weno_solutions/{}-coarse-{}-{}-{}-{}'.format(init_condition_name, Tscheme, dx,
flux_name, cfl_value)
'''
import numpy as np
import time
import math
import copy
from Weno.weno3_2 import Weno3, weno3_fv
from Weno.finite_difference_weno import weno3_fd
import os
import os.path as osp
import torch
from matplotlib import pyplot as plt
from matplotlib import animation
from scipy import interpolate
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
def random_init_condition():
'''
randomly generate initial conditions in the following form
a + b * np.sin(c * pi * x) + d * np.cos(e * pi * x).
c, e sampled from [2, 4, 6]
|a| + |b| + |d| = 4
'''
a = np.random.uniform(-1.2, 1.2)
b = np.random.uniform(-3 + np.abs(a), 3 - np.abs(a))
abs_d = 4 - np.abs(a) - np.abs(b)
if np.random.rand() > 0.5:
d = abs_d
else:
d = -abs_d
# print(a, b, d)
c = np.random.choice([4, 6])
e = np.random.choice([4, 6])
return a, b, c, d, e
def get_precise_value(reference_solution, precise_dt, T, precise_x_grid, corase_x_grid):
t_idx = int(T / precise_dt)
precise_val = reference_solution[t_idx]
f = interpolate.interp1d(precise_x_grid, precise_val)
ret = f(corase_x_grid)
return ret
def relative_error(ref, sol, norm=2):
return np.linalg.norm(ref - sol, norm) / np.linalg.norm(ref, norm)
def show(args, weno_coarse_rk4, reference_solution, dx=None, save_path=None, save_name=None, title=None):
fig = plt.figure(figsize = (15, 10))
ax = fig.add_subplot(2,1,1)
ax.set_xlim((args.x_low ,args.x_high))
# ymin, ymax = np.min(weno_coarse_rk4[0]) - 0.1, np.max(weno_coarse_rk4[0]) + 0.1
ymin, ymax = np.min(reference_solution) - 0.1, np.max(reference_solution) + 0.1
ax.set_ylim((ymin, ymax))
num_x = len(weno_coarse_rk4[0])
x_grid = np.linspace(args.x_low, args.x_high, num_x, dtype = np.float64)
lineweno, = ax.plot(x_grid, [0 for _ in range(num_x)] ,lw=2, label = 'reference')
lineweno_coarse, = ax.plot(x_grid, [0 for _ in range(num_x)], lw = 2, label = 'WENO')
dt = dx * args.cfl
num_t = int(args.T / dt) + 1 - 10
draw_data = np.zeros((num_t, 2*num_x))
draw_data[:, num_x:num_x*2] = weno_coarse_rk4[:num_t, :]
# factor = int(dx / args.precise_dx)
# print(f"dt {dt} factor {factor} precise_dx {args.precise_dx} cfl {args.cfl}")
precise_x_grid = np.linspace(args.x_low, args.x_high, len(reference_solution[0]))
for t in range(num_t):
draw_data[t, :num_x] = get_precise_value(reference_solution, args.precise_dx * args.cfl, t * dt, precise_x_grid, x_grid) # when doing showing, use the grid values
error_ax = fig.add_subplot(2,1,2)
coarse_error = np.zeros(num_t)
for i in range(num_t):
coarse_error[i] = relative_error(draw_data[i, :num_x], draw_data[i, num_x:2*num_x])
weno_coarse_error_line, = error_ax.plot(range(num_t), coarse_error, 'b', lw = 2, label = 'weno_relative_error')
weno_coarse_error_point, = error_ax.plot([], [], 'bo', markersize = 5)
def init():
lineweno.set_data([],[])
lineweno_coarse.set_data([], [])
weno_coarse_error_point.set_data([],[])
lineweno.set_label('Reference')
lineweno_coarse.set_label('WENO')
return lineweno, lineweno_coarse, weno_coarse_error_point
def func(i):
# print('make animations, step: ', i)
x = np.linspace(args.x_low, args.x_high, num_x)
yweno = draw_data[i,:num_x]
yweno_coarse = draw_data[i, num_x:2*num_x]
lineweno.set_data(x, yweno)
lineweno_coarse.set_data(x, yweno_coarse)
weno_coarse_error_point.set_data(i, coarse_error[i])
return lineweno, lineweno_coarse, weno_coarse_error_point
ax.grid()
anim = animation.FuncAnimation(fig=fig, func=func, init_func=init, frames=num_t, interval=50)
plt.legend()
plt.title(title)
plt.tight_layout()
if save_path is not None:
anim.save(osp.join(save_path, save_name), writer=writer)
else:
plt.show()
plt.close()
def get_weno_grid(init_condition, dx = 0.02, dt = 0.004, T = 0.8, x_low = -1, x_high = 1,
boundary='periodic', eta=0, forcing=None, ncells=None, num_t=None, flux_name='u2'):
"""
dx: grid size. e.g. 0.02
dt: time step size. e.g. 0.004
T: evolve time. e.g. 1
x_low: if the grid is in the range [-1,1], then x_low = -1.
x_high: if the grid is in the range [-1,1], then x_low = 1.
init_condition: a function that computes the initial values of u. e.g. the init func above.
return: a grid of size num_t x num_x
where num_t = T/dt + 1
num_x = (x_high - x_low) / dx + 1
"""
left_boundary = x_low - dx * 0.5
right_boundary = x_high + dx * 0.5
if ncells is None:
ncells = int((x_high - x_low) / dx + 1.) # need to be very small
if num_t is None:
num_t = int(T/dt + 1.)
w = Weno3(left_boundary, right_boundary, ncells,
lambda x: flux(val=x, flux=flux_name),
lambda x: flux_deriv(val=x, flux=flux_name),
lambda a, b: max_flux_deriv(a=a, b=b, flux=flux_name),
dx = dx, dt = dt, num_t = num_t + 100, boundary=boundary,
eta=eta, forcing=forcing)
x_center = w.get_x_center()
u0 = init_condition(x_center)
w.integrate(u0, T, num_t=num_t)
solutions = w.u_grid[:num_t,:]
return solutions
### the burgers flux function. Can be changed to any other functions if needed for future useage.
def flux(val, flux='u2'):
if flux == 'u2':
return val ** 2 / 2.
elif flux == 'u4':
return val ** 4 / 16.
elif flux == 'u3':
return val ** 3 / 9.
elif flux == 'BL':
return val ** 2 / (val ** 2 + 0.5 * (1-val) ** 2)
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return a * val
def flux_deriv(val, flux='u2'):
if flux == 'u2':
return val
elif flux == 'u4':
return val ** 3 / 4.
elif flux == 'u3':
return val ** 2 / 3
elif flux == 'BL':
return (val - 3 * val**2) / (val ** 2 + 0.5 * (1 - val)**2) ** 2
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return a
def max_flux_deriv(a, b, flux='u2'):
if flux == 'u2':
return np.maximum(np.abs(a), np.abs(b))
elif flux == 'u4':
return np.maximum(np.abs(a ** 3 / 4.), np.abs(b ** 3 / 4.))
elif flux == 'u3':
return np.maximum(np.abs(a ** 2 / 3.), np.abs(b ** 2 / 3.))
elif flux == 'BL':
a = (a - 3 * a**2) / (a ** 2 + 0.5 * (1 - a)**2) ** 2
b = (a - 3 * b**2) / (b ** 2 + 0.5 * (1 - b)**2) ** 2
return np.maximum(np.abs(a), np.abs(b))
elif flux.startswith("linear"):
a = float(flux[len('linear'):])
return np.abs(a)
def get_init_func(a, b, c, d, e):
def func(x, t=0):
return a + b * np.sin(c * np.pi * x) + d * np.cos(np.pi * e * x)
return func
def get_forcing():
nparams = 20
rs = np.random
k_min = 3
k_max = 6
a = 0.5 * rs.uniform(-1, 1, size=(20, 1))
omega = rs.uniform(-0.4, 0.4, size=(nparams, 1))
k_values = np.arange(k_min, k_max + 1)
k = rs.choice(np.concatenate([-k_values, k_values]), size=(nparams, 1))
phi = rs.uniform(0, 2 * np.pi, size=(nparams, 1))
def func(x, t, period):
spatial_phase = (2 * np.pi * k * x / period)
signals = np.sin(omega * t + spatial_phase + phi)
reference_forcing = np.sum(a * signals, axis=0)
return reference_forcing
forcing_params = [a, omega, k, phi]
return func, forcing_params
if __name__ == '__main__':
import argparse, sys
args = argparse.ArgumentParser(sys.argv[0])
args.add_argument('--prefix', type = str, default = '9-15-eta-0')
args.add_argument('--x_low', type = float, default = -1)
args.add_argument('--x_high', type = float, default = 1)
args.add_argument('--dx', type = float, default = 0.02)
args.add_argument('--cfl', type = float, default = 0.1)
args.add_argument('--T', type = float, default = 1.0)
args.add_argument('--precise_dx', type = float, default = 0.002)
args.add_argument('--initial_t', type = float, default = 0.)
args.add_argument('--num', type = int, default = 20)
args.add_argument('--flux', type = str, default = 'u2')
args.add_argument('--Tscheme', type = str, default = 'None')
args.add_argument('--eta', type = float, default = 0)
args.add_argument('--forcing', type = int, default = 0)
args = args.parse_args()
if args.forcing:
args.x_low = 0
args.x_high = 2 * np.pi
args.precise_dx = args.x_high / 1000.
args.T = np.pi
precise_num_t = 5000
if args.flux == 'u2':
np.random.seed(666)
else:
np.random.seed(555)
for i in range(53, args.num + 53):
print('init condition {}'.format(i))
a, b, c, d, e = random_init_condition()
print(a, b, c, d, e)
forcing = None
if args.forcing:
forcing, forcing_params = get_forcing()
a, b, d = 0, 0, 0 # make the initial condition always being 0.
init_func = get_init_func(a, b, c, d, e)
### compute and store precise solutions
fine_num_x = 1001
fine_x_grid = np.linspace(args.x_low, args.x_high, fine_num_x, dtype = np.float64) ### precise_dx = 0.001
fine_solution = get_weno_grid(init_func, dx=args.precise_dx, dt=args.precise_dx * args.cfl,
T=args.T, x_low=args.x_low, x_high=args.x_high, eta=args.eta, forcing=forcing,
ncells=fine_num_x, num_t=precise_num_t, flux_name=args.flux)
### compute and store weno solutions under coarse grids
dx_list = [0.05, 0.04, 0.02]
corase_num_x_list = [41, 51, 101]
corase_num_t_list = [200, 250, 500]
weno_coarse_solutions = {'rk4': {}, 'euler': {}}
for dx_idx, dx in enumerate(dx_list):
if args.forcing:
dx *= np.pi
print('dx: ', dx)
args.dx = dx
for tscheme in ['rk4', 'euler']:
args.Tscheme = tscheme
coarse_num_x = corase_num_x_list[dx_idx]
corase_num_t = corase_num_t_list[dx_idx]
coarse_x_grid = np.linspace(args.x_low, args.x_high, coarse_num_x, dtype = np.float64) ### precise_dx = 0.001
init_value = a + b * np.sin(c * np.pi * coarse_x_grid) + d * np.cos(e * np.pi * coarse_x_grid)
coarse_solver = weno3_fd(args, init_value=init_value, forcing=forcing, num_x=coarse_num_x, num_t=corase_num_t)
corase_solution = coarse_solver.solve()
weno_coarse_solutions[tscheme][str(round(dx, 2))] = corase_solution
# num_t = int(args.T / (dx * args.cfl)) - 10
# factor = int(dx / args.precise_dx)
# for idx_t in range(num_t):
# fig = plt.figure(figsize=(15, 7))
# plt.grid()
# plt.plot(coarse_x_grid, weno_coarse_solutions[1][idx_t], 'bo-')
# plt.plot(fine_x_grid, fine_solution[idx_t * int(dx / args.precise_dx)], 'r')
# plt.show()
### store the computed solutions
prefix = args.prefix
if not osp.exists('./data/local/solutions/{}'.format(prefix)):
os.makedirs('data/local/solutions/{}'.format(prefix), exist_ok=True)
# for dx in dx_list:
# if args.forcing:
# dx = dx * np.pi
# show(args, weno_coarse_solutions['rk4'][str(round(dx, 2))], fine_solution, dx=dx, title=str(i),
# save_path='data/local/solutions/{}'.format(prefix), save_name="{}-{}.mp4".format(i, dx))
data = {
'precise_solution': fine_solution,
'coarse_solution_rk4': weno_coarse_solutions['rk4'],
'coarse_solution_euler': weno_coarse_solutions['euler'],
'a': a,
'b': b,
'c': c,
'd': d,
'e': e,
'args': args
}
if args.forcing:
data['forcing'] = forcing_params
torch.save(data, 'data/local/solutions/{}/{}.pkl'.format(prefix, i))
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,751
|
simonat2011/L2D-learning-to-discretize
|
refs/heads/master
|
/scripts/test_trained_model_mesh.py
|
'''
This file tests RL-WENO under non-viscous burgers, without forcing, with different initial conditions.
'''
import json
import os
import os.path as osp
from BurgersEnv.Burgers import Burgers
from DDPG.DDPG_new import DDPG
from DDPG.util import GaussNoise
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
import utils.ptu as ptu
import time
import torch
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--flux', type=str, default='u2')
args = parser.parse_args()
path = 'data/seuss/9-20-many-64-multiple-dx-normalize-eta-0-all-break/9-20-many-64-multiple-dx-normalize-eta-0-all-break_2020_09_20_04_04_24_0006/'
epoch = '1200'
vv = json.load(open(osp.join(path, 'variant.json')))
if vv.get('batch_norm') is None:
vv['batch_norm'] = False
if vv.get('dx') is None:
vv['dx'] = 0.04
if vv.get('eta') is None:
vv['eta'] = 0
vv['flux'] = args.flux
if args.flux == 'u2':
vv['solution_data_path'] = 'data/local/solutions/8-14-50'
elif args.flux == 'u4':
vv['solution_data_path'] = 'data/local/solutions/9-24-50-u4-eta-0-forcing-0'
else:
raise NotImplementedError
print(vv)
ddpg = DDPG(vv, GaussNoise(initial_sig=vv['noise_beg'], final_sig=vv['noise_end']))
agent = ddpg
agent.load(osp.join(path, epoch), actor_only=True)
print("agent loaded!")
env = Burgers(vv, agent=agent)
dt_list = [0.002, 0.003, 0.004, 0.005, 0.006]
dx_list = [0.02, 0.04, 0.05]
len_dt_list = len(dt_list)
RL_errors = np.zeros((len(dx_list), len_dt_list, 2))
weno_errors = np.zeros((len(dx_list), len_dt_list, 2))
RL_all_errors = []
weno_all_errors = []
for x_idx, dx in enumerate(dx_list):
for t_idx, dt in enumerate(dt_list):
print("test on dt {} dx {}".format(dt, dx))
num_t = int(0.9 / dt)
RL_error = []
weno_error = []
for solution_idx in range(25):
pre_state = env.reset(solution_idx=solution_idx, num_t=num_t, dx=dx, dt=dt, weno_regenerate=True)
horizon = env.num_t
for t in range(1, horizon):
action = agent.action(pre_state, deterministic=True) # action: (state_dim -2, 1) batch
next_state, reward, done, _ = env.step(action, Tscheme='rk4')
pre_state = next_state
error, relative_error = env.error('rk4')
weno_error_rk4 = env.weno_error_rk4
RL_error.append(error)
weno_error.append(weno_error_rk4)
print("solution_idx {} relative error {}".format(solution_idx, relative_error))
RL_all_errors.append(RL_error)
weno_all_errors.append(weno_error)
RL_errors[x_idx][t_idx][0] = np.mean(RL_error)
weno_errors[x_idx][t_idx][0] = np.mean(weno_error)
RL_errors[x_idx][t_idx][1] = np.std(RL_error)
weno_errors[x_idx][t_idx][1] = np.std(weno_error)
# data_dict = {
# 'dt_list': dt_list,
# 'dx_list': dx_list,
# 'error_RL': RL_errors,
# 'error_weno': weno_errors,
# 'all_errors_RL': RL_all_errors,
# 'all_errors_weno': weno_all_errors
# }
# torch.save(data_dict, 'data/ComPhy/RL_and_weno_error_mesh_and_flux_{}.pkl'.format(args.flux))
for t_idx, dt in enumerate(dt_list):
print(dt, end = ' ')
for x_idx, dx in enumerate(dx_list):
RL_error_mean = round(RL_errors[x_idx][t_idx][0] * 100, 2)
weno_error_mean = round(weno_errors[x_idx][t_idx][0] * 100, 2)
RL_error_std = round(RL_errors[x_idx][t_idx][1] * 100, 2)
weno_error_std = round(weno_errors[x_idx][t_idx][1] * 100, 2)
print("\& {} ({}) \& {} ({})".format(RL_error_mean, RL_error_std,
weno_error_mean, weno_error_std), end=' ')
print('\\\hline')
# for x_idx, dx in enumerate(dx_list):
# print("({}, {})".format(dx, dx * 0.1), end='')
# RL_error = round(RL_errors[x_idx] * 100, 2)
# weno_error = round(weno_errors[x_idx] * 100, 2)
# print("\& {} \& {}".format(RL_error, weno_error), end=' ')
# print('\\\hline')
|
{"/launchers/train_td3.py": ["/get_args.py", "/DDPG/train_util.py", "/DDPG/DDPG_new.py", "/BurgersEnv/Burgers.py"], "/scripts/make_video.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/launchers/launch_td3.py": ["/launchers/train_td3.py"], "/BurgersEnv/Burgers.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/DDPG/DDPG_new.py": ["/DDPG/torch_networks.py"], "/scripts/test_trained_model_forcing.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"], "/scripts/generate_solutions.py": ["/Weno/weno3_2.py", "/Weno/finite_difference_weno.py"], "/scripts/test_trained_model_mesh.py": ["/BurgersEnv/Burgers.py", "/DDPG/DDPG_new.py"]}
|
39,766
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/models/knn.py
|
import numpy as np
from sklearn.linear_model import HuberRegressor
from sklearn.neighbors import NearestNeighbors
from scipy import stats
from smnsr.models import FROM, TO, PTID, FORECAST_Y
from sklearn.preprocessing import StandardScaler
from sklearn.base import clone
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
import pandas as pd
import re
FORECAST = "forecast"
SIMILARITIES = "similarities"
STD = "std"
SKEW = "skew"
FUTURE_MIN = "min"
FUTURE_MAX = "max"
BASELINE = "value"
def weightedL2(a,b,**kwargs):
weight = kwargs["w"]
q = a-b
return np.sqrt((weight*(q*q)).sum())
class KNNSR:
def __init__(self, tadpole_data,
n_neighbors=400,
aligning_n_neighbors=30,
modality="cognitive2",
target_value='ADAS13',
n_jobs = 16,
column_weights = [['AGE', 5], ['DX', 5], ['ADAS13', 5]]):
self.tadpole_data = tadpole_data
self.n_neighbors = n_neighbors
self.aligning_n_neighbors = aligning_n_neighbors
self.column_weights = column_weights
self.modality = modality
self.target_value = target_value
self.n_jobs = n_jobs
self.scaler = StandardScaler()
self.fitted = False
#self.regression_models = {"rf_forecast":RandomForestRegressor(n_estimators=10, n_jobs=4), "hr_forecast":HuberRegressor(fit_intercept=True, max_iter=200)}
self.regression_models = {"hr_forecast":HuberRegressor(fit_intercept=True, max_iter=200)}
self.training_ptids = []
def __create_distance_weights(self, data):
weight_vector = np.ones((1, len(data.columns)))
for c in self.column_weights:
p = re.compile('^'+c[0])
column_indexes = [ i for i,f in enumerate(data.columns) if p.match(f) ]
weight_vector[0, column_indexes] = c[1]
return weight_vector
def fit(self, x, y=0):
self.training_ptids = x
self.training_x,self.training_y, self.training_meta = self.tadpole_data.getXY(x,self.modality,self.target_value,retrieve_meta=True,include_target=True)
weights = self.__create_distance_weights(self.training_x)
self.scaler.fit(self.training_x)
self.training_x = self.scaler.transform(self.training_x)
knn = NearestNeighbors(n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs, metric=weightedL2,
metric_params={ 'w' : weights}
)
self.knn_model = knn.fit(self.training_x)
self.fitted = True
def predict(self, x,y=None):
"""
For compatibility with sklearn, perform predictions on an input matrix of
two columns consisting ptids and time_points
"""
forecast_list = {}
similarities_list = {}
std_list = {}
skew_list = {}
min_list = {}
max_list = {}
value_list = {}
if isinstance(x,np.ndarray):
n_samples = x.shape[0]
predicted_timepoints = x[:,2]
else:
n_samples = x[0].shape[0]
predicted_timepoints = [ x[1] for i in range(0,n_samples)]
x = x[0]
if y is None:
y = np.zeros(predicted_timepoints[0].shape[0])
for pt in range(0, n_samples):
assert self.tadpole_data.has_modality(x[pt, 0], self.modality)
forecasted, similarities, std, skew, v_min, v_max, v_value = self.predict_slope(x[pt, :],
predicted_time_points=
predicted_timepoints[pt],
d3=False)
forecast_list[x[pt, 0]] = forecasted
similarities_list[x[pt, 0]] = similarities
std_list[x[pt, 0]] = std
skew_list[x[pt, 0]] = skew
min_list[x[pt, 0]] = v_min
max_list[x[pt, 0]] = v_max
value_list[x[pt, 0]] = v_value
prediction = {FORECAST:forecast_list,
SIMILARITIES:similarities_list,
STD:std_list,
SKEW:skew_list,
FUTURE_MIN:min_list,
FUTURE_MAX:max_list,
BASELINE:value_list
}
return self.__to_list(x, y, prediction,predicted_timepoints)
def predict_slope(self, pt_data, predicted_time_points, d3=False):
ptid = str(pt_data[0])
start_point = int(float(pt_data[1]))
if predicted_time_points.shape == ():
predicted_time_points = np.expand_dims(predicted_time_points,axis=0)
predicted_time_points = predicted_time_points.astype(float)
starting_y_values = []
y_values = []
future_y_values = []
time_points = []
ptids = []
query_features, _ = self.tadpole_data.getXY([ptid], self.modality, target=self.target_value,time_points=[start_point],include_target=True,impute_ptids=self.training_ptids)
dist, ind = self.knn_model.kneighbors(self.scaler.transform(query_features))
dist = dist[0]
ind = ind[0]
neighbor_time_points = self.training_meta[self.tadpole_data.C_MONTH].tolist()
neighbor_ptids = self.training_meta[self.tadpole_data.PTID].tolist()
unique_ptids = []
matching_time_points = []
unique_ptid_count = 0
#Select n patients from the neighborhood
for i in range(0, ind.shape[0]):
if neighbor_ptids[i] in unique_ptids or neighbor_ptids[i] == ptid:
continue
if unique_ptid_count == self.aligning_n_neighbors:
break
unique_ptids.append(neighbor_ptids[ind[i]])
matching_time_points.append(neighbor_time_points[ind[i]])
unique_ptid_count += 1
neighbor_ptids = unique_ptids
target_x, target_y = self.tadpole_data.getXY( [ptid],self.modality, target=self.target_value,time_points=[start_point],impute_ptids=self.training_ptids)
target_y = target_y.values
for i,n_ptid in enumerate(neighbor_ptids):
queried_time_point = matching_time_points[i]
x_measurement, y_measurement, metadata = self.tadpole_data.getXY([n_ptid],self.modality,target=self.target_value,retrieve_meta=True,impute_ptids=self.training_ptids)
matching_y_value = y_measurement[ metadata[self.tadpole_data.C_MONTH] == queried_time_point].values[0]
xy_timepoints = metadata[self.tadpole_data.C_MONTH]
xy_timepoints -= queried_time_point
if xy_timepoints.shape[0] == 0:
print('No features for patient: %s'%(str(neighbor_ptids[i])))
continue
y_values.append(y_measurement.values)
future_y_values.append( y_measurement[ xy_timepoints >= 0].values)
time_points.append(xy_timepoints.values)
starting_y_values.append(np.tile(matching_y_value, y_measurement.shape[0]))
ptids.append(str(neighbor_ptids[i]))
# Stack the lists
y_values = np.hstack(y_values)
future_y_values = np.hstack(future_y_values)
starting_y_values = np.hstack(starting_y_values)
time_points = np.hstack(time_points)
# Change dtypes
y_values = y_values.astype('float64')
time_points = time_points.astype('float64')
# Sort the stacked lists
sorted_indexes = np.argsort(time_points)
starting_y_values = starting_y_values[sorted_indexes]
y_values = y_values[sorted_indexes]
time_points = time_points[sorted_indexes]
# Calculate population statistics
value_min = np.min(future_y_values)
value_max = np.max(future_y_values)
value_current = target_y
untransformed_train_features = np.hstack(
(np.expand_dims(time_points, axis=1), np.expand_dims(starting_y_values, axis=1)))
features = untransformed_train_features
# try:
regression_models = {}
for _,(k,model) in enumerate(self.regression_models.items()):
regression_models[k] = clone(model)
regression_models[k].fit(features, y_values.ravel())
untransformed_features = np.hstack((np.expand_dims( np.hstack((0,predicted_time_points-start_point)), axis=1),np.tile(target_y, (predicted_time_points.shape[0]+1, 1))))
features = untransformed_features.astype(float)
predictions = {}
for _,(key,model) in enumerate(regression_models.items()):
regression_forecast = np.expand_dims(model.predict(features), axis=1)
regression_forecast = regression_forecast +target_y - regression_forecast[0]
regression_forecast = regression_forecast[1:,:]
predictions[key] = regression_forecast
similarities = np.mean(dist)
std = np.std(dist)
skew = stats.skew(dist)
return predictions, similarities, std, skew, value_min, value_max, value_current[0]
def __to_list(self,x,y, prediction,forecast_timepoints):
column_names = []
predictions = []
for i in range(0,x.shape[0]):
ptid = x[i,0]
from_time_point = x[i,1]
if len(column_names) == 0:
column_names = [PTID, FORECAST_Y,FROM, TO]
column_names += list(prediction.keys())
column_names += prediction["forecast"][ptid].keys()
column_names.remove("forecast")
values = []
for key in prediction.keys():
if key != "forecast":
values.append([prediction[key][ptid]][0])
for t in range(0, len(forecast_timepoints[i])):
to_time_point = forecast_timepoints[i][t]
regression_values = []
for i, (k, f) in enumerate(prediction["forecast"][ptid].items()):
regression_values.append(float(f[t, 0]))
predictions.append([ptid, float(y[t]),float(from_time_point), float(to_time_point)] + values + regression_values)
return (predictions,column_names)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,767
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/cross_validate.py
|
from smnsr.models import SMNSR, Y_HAT
from smnsr.patients import AugmentedTADPOLEData, TADPOLEData
from smnsr.models import TO
from sklearn.model_selection import KFold
from argparse import ArgumentParser
import sys
from pytictoc import TicToc
import pandas as pd
from sklearn.metrics import mean_absolute_error, mean_squared_error
import pickle
import numpy as np
def perform_cv(args):
splitter = KFold(args.folds, random_state=0, shuffle=True)
data = TADPOLEData(
data=args.modality_path + args.data_file,
modality_path=args.modality_path,
modality_k=args.modality_k,
)
# Split the ptids into n folds
ptids = data.get_ptids(min_time_points=2, target=args.target)
print("Total patients in CV: %i" % len(ptids))
t = TicToc()
print("CV mode %s" % args.mode)
sys.stdout.flush()
predictions = []
modality_ranks = []
for fold, (train_index, test_index) in enumerate(splitter.split(ptids)):
print("Fold %i/%i" % (fold, args.folds - 1))
sys.stdout.flush()
train_ptids = [ptids[i] for i in train_index]
test_ptids = [ptids[i] for i in test_index]
aug_data = AugmentedTADPOLEData(
data, args.precomputed_path + "merged_%i.p" % fold, train_ptids
)
model = SMNSR(aug_data, n_jobs=args.cpus, forecast=False, mode=args.mode)
print("Fitting model")
t.tic()
model.fit(train_ptids)
print("Trainig took %s seconds" % t.tocvalue())
print("Performing forecasting")
sys.stdout.flush()
# Fetch known target values for the patients
y = aug_data.getY(test_ptids, target=args.target)
prediction_definition = y[[TADPOLEData.PTID, TADPOLEData.C_MONTH]]
print(
"Patients with more than one measurement in fold %i: %i"
% (fold, y[TADPOLEData.PTID].unique().shape[0])
)
y_hat = model.predict(prediction_definition, target=args.target)
prediction = y.merge(
y_hat,
left_on=[TADPOLEData.PTID, TADPOLEData.C_MONTH],
right_on=[TADPOLEData.PTID, TO],
)
predictions.append((prediction))
modality_ranks.append(model.ranked_modalities)
fold += 1
predictions = pd.concat(predictions, ignore_index=True)
with open(args.output_path + args.result_file_name, "wb") as file:
pickle.dump(prediction, file)
evaluate_predictions(predictions, data)
return prediction
def evaluate_predictions(predictions, data):
patient_info = data.get_features(
data.get_ptids(), [TADPOLEData.DX_BL, TADPOLEData.AGE]
)
predictions = predictions.merge(
patient_info,
left_on=[TADPOLEData.PTID, TADPOLEData.C_MONTH],
right_on=[TADPOLEData.PTID, TADPOLEData.C_MONTH],
how="left",
)
# Evaluate overall performance
y_hat = predictions[Y_HAT].values
y = predictions[AugmentedTADPOLEData.Y].values
print("Total patients: %i" % (predictions[TADPOLEData.PTID].unique().shape[0]))
overall_mae = mean_absolute_error(y_hat, y)
overall_rmse = mean_squared_error(y_hat, y)
print("Overall MAE: %f" % overall_mae)
print("Overall RMSE: %f" % overall_rmse)
distances = {}
unique_months = predictions[TADPOLEData.C_MONTH].unique()
# Plot predictions
def mae(data: pd.DataFrame, month: int):
y_hat = data[data[TADPOLEData.C_MONTH] == month][Y_HAT]
y = data[data[TADPOLEData.C_MONTH] == month][AugmentedTADPOLEData.Y]
if y.empty or y_hat.empty:
return np.nan
return mean_absolute_error(y_hat, y)
def rmse(data: pd.DataFrame, month: int):
y_hat = data[data[TADPOLEData.C_MONTH] == month][Y_HAT]
y = data[data[TADPOLEData.C_MONTH] == month][AugmentedTADPOLEData.Y]
if y.empty or y_hat.empty:
return np.nan
return mean_squared_error(y_hat, y, squared=False)
for m in unique_months:
distances[m] = {}
distances[m]["combined"] = (mae(predictions, m), rmse(predictions, m))
for diagnosis in predictions[TADPOLEData.DX_BL].unique():
patients_with_diagnosis = predictions[
predictions[TADPOLEData.DX_BL].isin([diagnosis])
]
distances[m][diagnosis] = (
mae(patients_with_diagnosis, m),
rmse(patients_with_diagnosis, m),
)
print("Pointwise MAE:")
for i, (group, month) in enumerate(distances.items()):
print("\t%s" % group)
for j, (k, v) in enumerate(month.items()):
print("\t\t %s: %f %f" % (k, v[0], v[1]))
sys.stdout.flush()
def parse_args(cli_args):
cli = ArgumentParser()
cli.add_argument("--folds", type=int, default=10)
cli.add_argument("--precomputed", action="store_true")
cli.add_argument("--modality_path", type=str, default="../../modalities/")
cli.add_argument(
"--precomputed_path", type=str, default="../../output/precomputed_folds/"
)
cli.add_argument("--data_file", type=str, default="TADPOLE_D1_D2.csv")
cli.add_argument("--target", type=str, default="ADAS13")
cli.add_argument("--bl_forecast", default=False, action="store_true")
cli.add_argument("--output_path", type=str, default="../../output/")
cli.add_argument("--result_file_name", type=str, default="cv_result.p")
cli.add_argument("--modality_k", type=int, default=8)
cli.add_argument("--cpus", type=int, default=None)
cli.add_argument("--mode", type=str, default="xgb")
return cli.parse_args(cli_args)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,768
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/models/regression.py
|
from smnsr.models import Y_HAT
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.dummy import DummyRegressor
from xgboost import XGBRFRegressor
import multiprocessing
from smnsr.patients import TADPOLEData
from smnsr.patients import AugmentedTADPOLEData
from smnsr.models import KNNSR
from smnsr.models.knn import BASELINE, FUTURE_MAX, FUTURE_MIN
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, make_scorer
from pytictoc import TicToc
from sklearn.base import RegressorMixin, TransformerMixin
from smnsr.models import BaseStackedModel, FROM, TO
# TODO: Add interaction_constraints
# TODO: Add early stopping using validation set
KNNSR_BYPASS = "_forecast"
class SMNSR(BaseStackedModel):
def __init__(
self,
data: AugmentedTADPOLEData,
training_cv_folds=5,
n_jobs=None,
verbosity=0,
mode="xgb",
forecast_min="baseline",
forecast_max=None,
max_modalities=1,
forecast=False,
):
super().__init__(data)
self.training_cv_folds = 5
self.verbosity = verbosity
self.forecast_min = forecast_min
self.forecast_max = forecast_max
self.mode = mode
self.max_modalities = max_modalities
self.forecast = forecast
if not n_jobs:
n_jobs = multiprocessing.cpu_count()
self.n_jobs = n_jobs
self.__create_pipeline()
self.fold_generator = KFold(n_splits=training_cv_folds)
self.ranked_modalities = []
self.knnsrs = {}
def __create_pipeline(self):
if self.mode == "bypass_knnsr":
pipeline = [("regression", KNNSRBypassRegression(column=KNNSR_BYPASS))]
self.pipeline_params = {}
self.n_jobs = 1
self.training_cv_folds = 2
if self.mode == "xgb":
pipeline = [
("variance_treshold", VarianceThreshold()),
("scale", StandardScaler()),
("regression", XGBRFRegressor()),
]
self.pipeline_params = {
"regression__n_estimators": [100, 200, 400, 800],
"regression__max_depth": [1, 3, 5, 7, 11],
"regression__subsample": [0.5, 1],
"regression__colsample_bylevel": [0.8, 1],
"regression__random_state": [0],
"regression__eval_metric": ["mae"],
"regression__reg_lambda": [0, 1],
"regression__reg_alpha": [0, 1],
"regression__objective": ["reg:squarederror"],
}
if self.mode == "linear":
pipeline = [
(
"filter",
FilterColumns(
columns=[BASELINE, AugmentedTADPOLEData.FORECAST_DIST]
),
),
("scale", StandardScaler()),
("polynomial_features", PolynomialFeatures()),
("regression", LinearRegression()),
]
self.pipeline_params = {}
self.n_jobs = 1
self.pipeline = Pipeline(pipeline)
def fit(self, ptids, target="ADAS13"):
ptids, _ = super()._process_x(ptids)
self.training_ptids = ptids
self.target = target
self.ranked_modalities = []
modalities = self.data.get_modalities()
tictoc = TicToc()
# Create a fall-back regressor guessing the target mean
self.fallback = DummyRegressor(strategy="mean")
target_values = self.data.getY(ptids, target, drop_baseline=False)[
AugmentedTADPOLEData.Y
]
self.fallback.fit(target_values, target_values)
# If we are not using precalculated features, KNNSR models need to be trained.
for i, modality in enumerate(modalities):
if self.forecast:
if self.verbosity == 2:
print("\tFitting KNNSR for %s" % modality)
knn = KNNSR(self.data.data, modality=modality, target_value=target)
knn.fit(ptids)
self.knnsrs[modality] = knn
if self.verbosity > 0:
print(
"\tFitting %s for %s (%i/%i)"
% (self.mode, modality, i, len(modalities))
)
tictoc.tic()
x, y, t = self.data.getXY(ptids, modality, target=target)
# If none of the patients in the training set have the modality, return a DummyClassifier guessing the mean
# of the target.
if x.shape[0] == 0:
self.ranked_modalities.append((np.inf, modality, self.fallback))
if self.verbosity == 2:
print("\tNo samples for %s in training set" % modality)
continue
# Fit model with CV. Store the model and score
grid = GridSearchCV(
self.pipeline,
cv=self.fold_generator,
n_jobs=self.n_jobs,
param_grid=self.pipeline_params,
verbose=0,
scoring=make_scorer(mean_absolute_error, greater_is_better=False),
)
grid.fit(x, y)
if self.verbosity == 2:
print(
"\tTraining for %s took %f"
% (modality, tictoc.tocvalue(restart=True))
)
self.ranked_modalities.append(
(grid.best_score_, modality, grid.best_estimator_, grid.best_params_)
)
# Rank modalities according to score.
self.ranked_modalities = sorted(
self.ranked_modalities, key=lambda l: l[0], reverse=True
)
if self.verbosity == 2:
for i, m in enumerate(self.ranked_modalities):
print("\t%i %s %f" % (i, m[1], m[0]))
def is_fitted(self):
return len(self.ranked_modalities) > 0
def predict(
self,
x,
target,
step_size=1,
forecast_start="2018-01-01",
forecast_end="2022-12-01",
):
assert self.is_fitted()
forecast_definition, nan_mask = super()._create_forecast_definition(
x,
forecast_start=forecast_start,
forecast_end=forecast_end,
step_size=step_size,
)
patients = forecast_definition[TADPOLEData.PTID].unique()
results = []
# Loop over the patients
for i, ptid in enumerate(patients):
target_time_points = sorted(
forecast_definition[forecast_definition[TADPOLEData.PTID] == ptid][
TADPOLEData.C_MONTH
].values.tolist()
)
patient_time_points = self.data.data.get_patient_time_points(ptid)
start_time_points = [patient_time_points[-1]]
if self.forecast:
if self.verbosity > 1:
print("%i/%i" % (i, len(patients)))
modality_count = 0
prediction = None
for i, modality in enumerate(self.ranked_modalities):
if not self.data.data.has_modality(
ptid,
modality[1],
time_points=start_time_points,
target=None,
nan_mask=nan_mask,
):
continue
# Get features from the lower stack.
x = (
np.array([[ptid, start_time_points[0]]]),
start_time_points[0] + np.array(target_time_points),
)
knnsr_predictions = self.knnsrs[modality[1]].predict(x)
xy_measurement = self.data.data.getXY(
[ptid],
modality[1],
target=target,
split=False,
impute_ptids=self.training_ptids,
)
knnsr_predictions = self.data.merge_view(
xy_measurement,
pd.DataFrame(
data=knnsr_predictions[0], columns=knnsr_predictions[1]
),
)
x, y, t = self.data.getXY(
[ptid],
modality[1],
target=target,
start_time_points=start_time_points,
target_time_points=start_time_points[0]
+ np.array(target_time_points),
knnsr_prediction=knnsr_predictions,
)
if x.shape[0] > 0:
y_hat = modality[2].predict(x)
y_hat = self.__cap_prediction(y_hat.ravel(), x)
if self.verbosity > 1:
print("Utilizing %s for patient %s" % (modality[1], ptid))
if modality_count == 0:
prediction = t
prediction[FROM] = prediction[FROM] + start_time_points[0]
prediction[TO] = prediction[TO] + start_time_points[0]
prediction[Y_HAT] = y_hat
prediction[TADPOLEData.PTID] = ptid
if modality_count > 0:
prediction[Y_HAT] = prediction[Y_HAT] + y_hat
modality_count += 1
if modality_count >= self.max_modalities:
break
assert prediction is not None, "Prediction was None for %" % ptid
prediction[Y_HAT] = prediction[Y_HAT] / modality_count
results.append(prediction)
else:
patient_time_points = self.data.data.get_patient_time_points(ptid)
start_time_points = [patient_time_points[0]]
for target_time in target_time_points:
modality_count = 0
prediction = None
for i, modality in enumerate(self.ranked_modalities):
x, y, t = self.data.getXY(
[ptid],
modality[1],
target=target,
start_time_points=start_time_points,
target_time_points=[target_time],
)
if x.shape[0] > 0:
y_hat = modality[2].predict(x)
y_hat = self.__cap_prediction(y_hat, x)
if modality_count == 0:
prediction = t
prediction[Y_HAT] = y_hat
prediction[TADPOLEData.PTID] = ptid
if modality_count > 0:
prediction[Y_HAT] = prediction[Y_HAT] + y_hat[0]
modality_count += 1
if modality_count >= self.max_modalities:
break
if modality_count > 0:
prediction[Y_HAT] = prediction[Y_HAT] / modality_count
assert prediction is not None, (
"No modalities modalities for patient %s" % ptid
)
results.append(prediction)
results: pd.DataFrame = pd.concat(results)
patient_data = self.data.data.get_features(
results[TADPOLEData.PTID].unique(), []
)[[TADPOLEData.PTID, TADPOLEData.RID]]
results = results.merge(
patient_data,
right_on=TADPOLEData.PTID,
how="right",
left_on=TADPOLEData.PTID,
)
return results
def __cap_prediction(self, y_hat, x):
if self.forecast_min == "baseline":
return np.maximum(y_hat, x[BASELINE].values)
if self.forecast_min == "future":
return np.maximum(y_hat, x[FUTURE_MIN].values)
if self.forecast_max == "future":
return np.minimum(y_hat, x[FUTURE_MAX].values)
class KNNSRBypassRegression(RegressorMixin):
def __init__(self, column="forecast"):
self.column = column
def fit(self, X=None, y=None):
assert isinstance(X, pd.DataFrame)
self.bypassed_features = [
c for i, c in enumerate(X.columns) if c.endswith(self.column)
]
return self
def predict(self, X=None):
# If e.g. predictions from several KNN regressors are passed, return the mean
if len(self.bypassed_features) > 1:
return np.mean(X.values[:, self.bypassed_features], axis=1)
return X[self.bypassed_features].values
class FilterColumns(TransformerMixin):
def __init__(self, columns, exclude=False):
self.__columns = columns
self.__exclude = exclude
def fit(self, X, y=None):
return self
def transform(self, x, y=None):
if self.__exclude:
return x[[c for c in x.columns if c not in self.__columns]]
return x[self.__columns]
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,769
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/patients/TADPOLEData.py
|
"""
Created on Fri Oct 13 17:15:12 2017
@author: ciszek
"""
import pandas as pd
import vaex as vx
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import re
from sklearn.impute import KNNImputer
import itertools
import yaml
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pkg_resources
class TADPOLEData:
PTID = "PTID"
RID = "RID"
VISCODE = "VISCODE"
DX = "DX"
DX_MAPPED = "DX_MAPPED"
DX_BL = "DX_bl"
CN = "CN"
MCI = "MCI"
AD = "AD"
C_MONTH = "M"
AGE = "AGE"
DN = ["D1", "D2"]
EXAM_DATE = "EXAMDATE"
YEARS_BL = "Years_bl"
RELATIVE_VENTRICLE_VOLUME = "RelativeVentricleVolume"
ICV = "ICV"
VENTRICLES = "Ventricles"
IMPUTED = "imputed"
Y_FEATURES = ["ADAS13"]
DEFAULT_IMPUTE_MODALITY = "patient"
CATEGORICAL = [
"PTEDUCAT",
"DXCHANGE",
"APOE4",
DX,
DX_BL,
"PTGENDER",
"PTETHCAT",
"PTRACCAT",
"PTMARRY",
]
META = [PTID, RID, C_MONTH, EXAM_DATE, YEARS_BL, VISCODE, DX_MAPPED, DX_BL]
MODALITY_YAML = "modalities.yaml"
ONE_HOT_ENCODED = {}
def __init__(
self,
data,
modality_path=None,
modality_k=7,
verbosity=2,
challenge_filter=True,
):
self.verbosity = verbosity
if verbosity == 2:
print("Reading data")
if modality_path is None:
modality_path = pkg_resources.resource_filename(
__name__, "/../modalities/"
)
assert isinstance(data, str) or isinstance(
data, pd.DataFrame
), "Input data must be specified as a filepath or as a DataFrame"
if isinstance(data, str):
self._df = pd.read_csv(data, low_memory=False)
else:
self._df = data.copy()
self.df_raw = self._df.copy()
if verbosity == 2:
print("Correcting age")
self._df = self.__correct_age(self._df)
if verbosity == 2:
print("Adding relative ventricle volume")
self._df = self.__add_relative_ventricle_volume(self._df)
self._df = self.__propagate_dx(self._df)
if verbosity == 2:
print("One hot encoding data")
self._df, self.one_hot_column_map, self.encoders = self.__one_hot_encode(
self._df
)
if verbosity == 2:
print("Translating clinical status")
self._df = self.__translate_clinical_status(self._df)
self.__load_modalities(modality_path)
self.relevant_columns = []
for m in self.modality_columns:
self.relevant_columns += self.modality_columns[m]
self.relevant_columns += self.META
self._df = self._df[self.relevant_columns]
if verbosity == 2:
print("Correcting non-numeric entries")
self._df = self.__correct_non_numeric_entries(self._df)
if verbosity == 2:
print("Total patients: %i" % (np.unique(self._df["PTID"]).shape[0]))
self.one_hot_encoded = {}
if verbosity == 2:
print("Creating modality combinations")
self.__create_modality_combinations(modality_k)
if verbosity == 2:
print("Adding patient columns")
self.__add_patient_columns()
self.modality_names = self.get_modalities()
self.set_backend("pandas")
if challenge_filter:
self.use_challenge_modalities()
def set_backend(self, backend="pandas"):
self.backend = pd
if self.verbosity == 2:
print("Backend set to %s" % backend)
def __load_modalities(self, modality_path):
self.modality_columns = {}
with open(modality_path + self.MODALITY_YAML) as file:
modalities = yaml.full_load(file)
for modality_name in modalities.keys():
self.modality_columns[
modality_name
] = self.__translate_one_hot_encoded_columns(modalities[modality_name])
def __translate_clinical_status(self, df):
def map_status(value):
if value == "NL" or value == "MCI to NL":
return self.CN
if value == "MCI" or value == "NL to MCI" or value == "Dementia to MCI":
return self.MCI
if (
value == "Dementia"
or value == "NL to Dementia"
or value == "MCI to Dementia"
):
return self.AD
return pd.NA
df[self.DX_MAPPED] = df[self.DX]
df[self.DX_MAPPED] = df[self.DX_MAPPED].map(map_status)
return df
def __correct_age(self, data):
data[self.AGE] += data[self.YEARS_BL]
return data
def __add_relative_ventricle_volume(self, data):
data[self.RELATIVE_VENTRICLE_VOLUME] = data[self.VENTRICLES] / data[self.ICV]
return data
def __correct_non_numeric_entries(self, data_file):
dtypes = data_file.dtypes
for c in range(1, data_file.shape[1]):
if data_file.columns[c] in self.META:
continue
if dtypes[c] == "float64":
continue
if np.issubdtype(dtypes[c], np.object):
data_file.loc[:, data_file.columns[c]] = data_file.loc[
:, data_file.columns[c]
].astype("str")
data_file.loc[:, data_file.columns[c]] = data_file.loc[
:, data_file.columns[c]
].str.replace(" ", "")
data_file.loc[:, data_file.columns[c]] = data_file.loc[
:, data_file.columns[c]
].str.replace("<", "")
data_file.loc[:, data_file.columns[c]] = data_file.loc[
:, data_file.columns[c]
].str.replace(">", "")
data_file.loc[:, data_file.columns[c]].replace(
r"^\s*$", np.nan, regex=True, inplace=True
)
try:
data_file.loc[:, data_file.columns[c]] = data_file.loc[
:, data_file.columns[c]
].astype("float32")
except ValueError as e:
print(data_file.columns[c])
print(e)
return data_file
def __one_hot_encode(self, data_file: pd.DataFrame, encoders=None):
dtype_dict = {}
one_hot_column_map = {}
if encoders is None:
encoders = {}
for value in self.CATEGORICAL:
if value in data_file.columns:
dtype_dict[value] = "object"
data_file = data_file.astype(dtype=dtype_dict)
for c in range(1, data_file.shape[1]):
if data_file.columns[c] in self.META:
continue
if data_file.columns[c] in self.CATEGORICAL:
non_nan_indexes = np.where(
data_file.loc[:, data_file.columns[c]].notnull()
)[0]
if non_nan_indexes.shape[0] == 0:
continue
if data_file[data_file.columns[c]].apply(lambda x: x == " ").any():
continue
if not data_file.columns[c] in encoders.keys():
oh = OneHotEncoder()
le = LabelEncoder()
le.fit(data_file.iloc[non_nan_indexes, c].values)
oh.fit(
le.transform(data_file.iloc[non_nan_indexes, c].values).reshape(
-1, 1
)
)
encoders[data_file.columns[c]] = (oh, le)
c_encoder = encoders[data_file.columns[c]]
encoded = c_encoder[0].transform(
c_encoder[1]
.transform(data_file.iloc[non_nan_indexes, c].values)
.reshape(-1, 1)
)
encoded = encoded.toarray()
nan_data = np.empty((data_file.shape[0], encoded.shape[1]))
nan_data[:, :] = np.nan
encoded_column_names = [
data_file.columns[c] + "_" + str(i)
for i in range(0, encoded.shape[1])
]
for i in range(0, non_nan_indexes.shape[0]):
nan_data[non_nan_indexes[i], :] = encoded[i, :]
one_hot_column_map[data_file.columns[c]] = encoded_column_names
for i, encoded_column in enumerate(encoded_column_names):
data_file[encoded_column] = nan_data[:, i]
return data_file, one_hot_column_map, encoders
def __translate_one_hot_encoded_columns(self, columns):
translated_columns = columns.copy()
for c in columns:
if c in list(self.one_hot_column_map.keys()):
translated_columns.remove(c)
translated_columns += self.one_hot_column_map[c]
return translated_columns
def __propagate_dx(self, data_file: pd.DataFrame):
data_file[self.DX] = data_file.groupby(self.PTID)[self.DX].ffill()
data_file.loc[data_file[self.DX].isna(), self.DX] = data_file[
data_file[self.DX].isna()
][self.DX_BL].values
return data_file
def __add_patient_columns(self):
keys = [key for key in self.modality_columns.keys()]
for key in keys:
if key != "patient" and key != "adas13":
self.modality_columns[key] = (
self.modality_columns[key] + self.modality_columns["patient"]
)
def __create_modality_combinations(self, max_size=2):
modality_names = [
key
for key in self.modality_columns.keys()
if (key != "patient") and (key != "adas13")
]
if max_size is None or max_size > len(modality_names):
max_size = len(modality_names)
separator = "-"
all_combinations = []
for i in range(2, max_size + 1):
all_combinations += list(itertools.combinations(modality_names, i))
for c in all_combinations:
combined_modality_columns = []
for m in c:
combined_modality_columns += self.modality_columns[m]
combined_modality_columns = list(set(combined_modality_columns))
new_combination = True
new_modality_name = separator.join(c)
# Ensure that the added combinatorial modality is not already presented with a different name
# This only happens when overlapping modalities are defined in the modalities.yaml
for k, v in self.modality_columns.items():
if set(v) == set(combined_modality_columns):
new_combination = False
if self.verbosity:
print("%s already exists as %s" % (new_modality_name, k))
continue
if new_combination:
self.modality_columns[new_modality_name] = combined_modality_columns
def use_challenge_modalities(self):
filtered = {}
if self.verbosity == 2:
print("Replicating features utilized in the original TADPOLE challenge")
for i, (modality, columns) in enumerate(self.modality_columns.items()):
# Filter all modalities that contain word "apo4" except modality "apo4" and "patient-apo4"
if "apo4" in modality and (
modality != "apo4" or modality != "apo4-patient"
):
continue
# Filter all modalities that contain cognitive1 but do not contain cognitive_simple
if ("cognitive1" in modality and ("cognitive_simple") not in modality) or (
"cognitive_simple" in modality and "cognitive1" not in modality
):
continue
# ADAS13 and MMSE were merged with cognitive1.
if modality == "cognitive_simple":
continue
filtered[modality] = columns
self.modality_columns = filtered
self.modality_names = self.get_modalities()
def save_modality(self, save_folder):
if self.verbosity > 1:
print("Saving modalities")
for modality in self.modality_columns.keys():
column_names = self.modality_columns[modality]
column_names.extend(
[self.PTID, self.C_MONTH, self.DN[0], self.DN[1], self.EXAM_DATE]
)
if isinstance(self._df, vx.dataframe.DataFrame):
data = self._df[
list(set(self._df.get_column_names()) & set(column_names))
]
else:
data = self._df[self._df.columns.intersection(column_names)]
if self.backend == "vaex":
data = data.to_pandas_df()
data.to_csv(save_folder + modality + ".csv")
def get_modalities(self):
return [key for key in self.modality_columns.keys()]
def getXY(
self,
ptids,
modality,
target=None,
time_points=None,
retrieve_meta=False,
split=True,
impute_ptids=[],
include_target=False,
):
value_columns = self.modality_columns[modality].copy()
if self.AGE not in self.modality_columns[modality]:
value_columns += [self.AGE]
if include_target and target and not target in value_columns:
value_columns += [target]
selected_columns = value_columns.copy()
selected_columns += [self.C_MONTH]
if target and not target in selected_columns:
selected_columns += [target]
selected_columns += [self.PTID]
measurements = self._df[self._df[self.PTID].isin(ptids)][
selected_columns
].copy()
if time_points is not None:
measurements = measurements[measurements[self.C_MONTH].isin(time_points)]
if target is not None:
# If the only missing column is the target column, impute the missing target
measurements[self.IMPUTED] = 0
if len(impute_ptids) > 0:
target_missing = measurements[
(measurements[target].isna())
& (measurements[value_columns].isna().sum(axis=1) <= 1)
]
with pd.option_context("mode.chained_assignment", None):
measurements[measurements.index.isin(target_missing.index)][
self.IMPUTED
] = 1
if target_missing.shape[0] > 0:
imputer = KNNImputer(weights="distance", n_neighbors=5)
# The missing target will be imputed using the "patient" modality
impute_modality_columns = self.modality_columns[
self.DEFAULT_IMPUTE_MODALITY
].copy()
impute_modality_columns += [target]
training_measurements = self._df[
self._df[self.PTID].isin(impute_ptids)
][impute_modality_columns]
imputer.fit(training_measurements.values)
imputed = imputer.transform(
measurements[impute_modality_columns].values
)[:, len(impute_modality_columns) - 1]
measurements[target] = imputed.tolist()
else:
if isinstance(measurements, vx.dataframe.DataFrame):
measurements = measurements.dropmissing(column_names=[target])
else:
measurements.replace(r"^\s*$", pd.NA, regex=True, inplace=True)
measurements = measurements[measurements[target].notnull()]
measurements = measurements.dropna()
measurements[self.IMPUTED] = measurements[self.IMPUTED].astype(int)
value_columns += [self.IMPUTED]
if not split:
return measurements
x_measurement = measurements[value_columns]
y_measurements = []
if target:
y_measurements = measurements[target]
if retrieve_meta:
return (
x_measurement,
y_measurements,
measurements[[self.C_MONTH, self.PTID, self.IMPUTED]],
)
else:
return x_measurement, y_measurements
def get_features(self, ptids, features):
measurements = self._df[self._df[self.PTID].isin(ptids)]
measurements = measurements[
features + [self.PTID, self.C_MONTH, self.EXAM_DATE, self.RID]
]
measurements = measurements.dropna()
return measurements
def has_modality(
self,
ptid,
modality,
time_points=None,
target=None,
nan_mask=[],
predictable=False,
):
expected_columns = self.modality_columns[modality].copy()
# Missing target feature may be imputed if it missing from the modality.
if target:
if target in expected_columns:
expected_columns.remove(target)
expected_columns.append(target)
data = self._df
if len(nan_mask) > 0:
data = self._df.copy()
data[nan_mask] = np.nan
if time_points:
data = data[
(data[self.PTID] == ptid) & (data[self.C_MONTH].isin(time_points))
]
else:
data = data[data[self.PTID] == ptid]
data = data.sort_values(self.C_MONTH)
if predictable and target:
return data[expected_columns].iloc[1:, :].dropna().shape[0] > 0
else:
return data[expected_columns].dropna().shape[0] > 0
def get_patient_modalities(self, ptid, time_points=None, target=None):
return [
modality
for modality in self.get_modalities()
if self.has_modality(ptid, modality, time_points=time_points, target=target)
]
def get_ptids(self, min_time_points=1, target=None):
columns = [self.PTID]
if target:
columns.append(target)
if min_time_points == 1:
ptids = self._df[columns]
else:
counts = self._df[self.PTID].value_counts()
ptids = self._df[
self._df[self.PTID].isin(counts.index[counts >= min_time_points])
]
if target is not None:
ptids = ptids.sort_values(self.C_MONTH)
predictable = (
ptids.loc[
ptids.groupby(self.PTID)[target]
.apply(lambda x: x.iloc[1:])
.index.get_level_values(1),
:,
]
.groupby(self.PTID)[target]
.apply(lambda x: x.isnull().all())
)
predictable = predictable[~predictable]
ptids = ptids[ptids[self.PTID].isin(predictable.index)]
return ptids[self.PTID].unique().tolist()
def column_difference(self, df1: pd.DataFrame, df2: pd.DataFrame):
df2_columns = []
def extract_column(c):
one_hot = re.search("([A-Z]*)(?=[_][0-9])", c)
if one_hot is not None:
return one_hot.group(0)
else:
return c
for c in df2.columns:
df2_columns.append(extract_column(c))
df2_columns = list(set(df2_columns))
difference = []
for c in df1.columns:
extracted = extract_column(c)
if extracted not in df2_columns:
difference.append(c)
return difference
def get_patient_time_points(self, ptid):
return sorted(
self._df[self._df[self.PTID].isin([ptid])][self.C_MONTH].values.tolist()
)
def distance_to_date(self, ptid: str, month: int, target_date: str):
date_for_month = self._df[
(self._df[TADPOLEData.PTID].isin([ptid]))
& (self._df[TADPOLEData.C_MONTH].isin([month]))
][TADPOLEData.EXAM_DATE]
if date_for_month.shape[0] > 0:
date_for_month = date_for_month.values[0]
distance = relativedelta(
datetime.strptime(target_date, "%Y-%m-%d"),
datetime.strptime(date_for_month, "%Y-%m-%d"),
)
return distance.years * 12 + distance.months
else:
return np.nan
def rids_to_ptids(self, rids):
rid_df: pd.DataFrame = pd.DataFrame(data=rids, columns=[self.RID])
ptid_df: pd.DataFrame = self._df[[self.PTID, self.RID]]
ptid_df = (
ptid_df.merge(rid_df, left_on=self.RID, right_on=self.RID, how="inner")
.groupby(self.PTID)
.head(1)
)
return ptid_df[self.PTID].values.tolist()
def save_dummy(self, file_name="dummy_data.csv", n_samples=300):
dummy = self.df_raw.copy()
for c in dummy.columns:
dummy[c] = np.random.permutation(dummy[c].values)
dummy = dummy[
dummy[TADPOLEData.PTID].isin(dummy[TADPOLEData.PTID].unique()[0:n_samples])
]
dummy.to_csv(file_name)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,770
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/patients/timeseries_creation.py
|
import pandas as pd
from smnsr.models import KNNSR
import numpy as np
import logging
from smnsr.patients import TADPOLEData
import psutil
import ray
import sys
def create_features(
tadpole_data: TADPOLEData,
train_ptids,
test_ptids,
modalities=None,
return_knn=False,
num_cpus=None,
):
if not num_cpus:
num_cpus = psutil.cpu_count(logical=False)
tadpole_data.set_backend("pandas")
df_id = ray.put(tadpole_data)
if modalities is None:
modalities = tadpole_data.modality_names
avg = len(train_ptids) / float(num_cpus)
split_train_ids = []
split_test_ids = []
last = 0.0
timeseries_features = {}
knn_models = {}
while last < len(train_ptids):
split_train_ids.append(train_ptids[int(last) : int(last + avg)])
last += avg
last = 0.0
avg = len(test_ptids) / float(num_cpus)
while last < len(test_ptids):
split_test_ids.append(test_ptids[int(last) : int(last + avg)])
last += avg
def process_ptids(ptids, m, t, df_id, knn_id):
result_ids = [
create_pt_series.remote(df_id, knn_id, m, t, ptids[i])
for i in range(num_cpus)
]
results = ray.get(result_ids)
return pd.concat(results, ignore_index=False)
for m in modalities:
sys.stdout.flush()
timeseries_features[m] = {}
if m == "adas13":
continue
if return_knn:
knn_models[m] = {}
for t in tadpole_data.Y_FEATURES:
knn = KNNSR(tadpole_data, modality=m, target_value=t)
knn.fit(train_ptids)
knn_id = ray.put(knn)
train_ts = process_ptids(split_train_ids, m, t, df_id, knn_id)
if len(test_ptids) > 0:
test_ts = process_ptids(split_test_ids, m, t, df_id, knn_id)
results = pd.concat([train_ts, test_ts], ignore_index=False)
else:
results = train_ts
timeseries_features[m][t] = results
if return_knn:
knn_models[m][t] = knn
return timeseries_features, knn_models
@ray.remote
def create_pt_series(df, knn, feature_modality, target, train_ptids):
column_names = []
predictions = []
for ptid in train_ptids:
logging.getLogger(__name__).debug("Patient %s" % (ptid))
features, y, modality_metadata = df.getXY(
[ptid],
feature_modality,
target,
retrieve_meta=True,
impute_ptids=train_ptids,
)
feature_time_points = modality_metadata[df.C_MONTH].values
# Targets are are unimputed data
target_features = df.get_features([ptid], [target])
target_time_points = target_features[df.C_MONTH].values
target_features = target_features[target].values
sorted_indexes = np.argsort(feature_time_points)
feature_time_points = feature_time_points[sorted_indexes]
sorted_indexes = np.argsort(target_time_points)
target_time_points = target_time_points[sorted_indexes]
target_features = target_features[sorted_indexes]
if target_features.shape[0] < 1 or feature_time_points.shape[0] < 1:
logging.getLogger(__name__).debug(
"Patient %s measuremets or target value. "
"Number of measurements: %i, Number of target values %i. Skipping"
% (target, features.shape[0], target_features.shape[0])
)
continue
for m in range(0, len(feature_time_points)):
from_time_point = feature_time_points[m]
forecast_timepoints = target_time_points[
from_time_point < target_time_points
]
y_true = target_features[from_time_point < target_time_points]
if forecast_timepoints.shape[0] == 0:
break
x = (np.array([[ptid, from_time_point]]), forecast_timepoints)
prediction = knn.predict(x, y_true)
if len(column_names) == 0:
column_names = prediction[1]
predictions += prediction[0]
return pd.DataFrame(data=predictions, columns=column_names)
def augment_ts(x, y, times, std):
x_temp = np.tile(x, (times, 1))
y_temp = np.tile(y, (times, 1))
for c in range(0, x_temp.shape[1]):
if np.unique(x_temp[:, c]).shape[0] < 3:
continue
x_temp[:, c] += np.random.normal(0, std, (x_temp.shape[0],))
x = np.vstack((x, x_temp))
y = np.vstack((y, y_temp))
return x, y
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,771
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/patients/AugmentedTADPOLEData.py
|
import pickle
import bz2
from smnsr.models import FROM, TO
from smnsr.patients import TADPOLEData
from pytictoc import TicToc
from pathlib import Path
import gdown
from tempfile import TemporaryDirectory
import os
class AugmentedTADPOLEData:
FORECAST_DIST = "forecast_dist"
Y = "y"
TMP_FILE = "ts_tmp.p"
def __init__(self, data, ts_data_file, train_ptids, verbosity=2, tmp_path=None):
self.verbosity = verbosity
self.train_ptids = train_ptids
assert isinstance(data, TADPOLEData)
if isinstance(data, TADPOLEData):
self.data = data
assert (
isinstance(ts_data_file, str)
or isinstance(ts_data_file, dict)
or isinstance(ts_data_file, tuple)
or isinstance(ts_data_file, list)
)
if isinstance(ts_data_file, str):
tmp_dir = None
if ts_data_file.startswith("https://"):
tmp_dir: TemporaryDirectory = TemporaryDirectory()
output = os.path.join(tmp_dir.name, self.TMP_FILE)
gdown.download(ts_data_file, output, quiet=False)
ts_data_file = output
if verbosity == 2:
print("Reading pre-calculated KNNSR data")
with bz2.BZ2File(ts_data_file, "rb") as file:
self.__process_knnsr_data(pickle.load(file))
if verbosity == 2:
print("Time series reading completed")
if tmp_dir:
tmp_dir.cleanup()
else:
self.__process_knnsr_data(ts_data_file)
def __process_knnsr_data(self, knnsr_data):
if isinstance(knnsr_data, dict):
if self.verbosity == 2:
print("Unmerged KNNSR data provided")
self.__merge_knnr_data(knnsr_data)
if isinstance(knnsr_data, tuple) or isinstance(knnsr_data, list):
if self.verbosity == 2:
print("Merged KNNSR data provided")
self.views = knnsr_data[1]
def __merge_knnr_data(self, knnsr_data):
if self.verbosity == 2:
print("Combining KNNSR data")
self.views = {}
ptids = self.data.get_ptids()
tictoc = TicToc()
tictoc.tic()
self.valid_ptids = []
for m in self.data.get_modalities():
self.views[m] = {}
for t in knnsr_data[m].keys():
xy_measurement = self.data.getXY(
ptids, m, target=t, split=False, impute_ptids=self.train_ptids
)
modality_ts = knnsr_data[m][t]
if not isinstance(modality_ts, self.data.backend.DataFrame):
modality_ts = self.data.backend.DataFrame(modality_ts)
view = self.merge_view(xy_measurement, modality_ts)
self.views[m][t] = view
self.valid_ptids += view[TADPOLEData.PTID].values.tolist()
if self.verbosity == 2:
print("Mergin KNNSR data took %f" % tictoc.tocvalue())
self.valid_ptids = list(set(self.valid_ptids))
if self.verbosity == 2:
print("%i patients in set" % len(self.valid_ptids))
def merge_view(self, xy_measurement, modality_ts, offset=None):
view = xy_measurement.merge(
modality_ts,
how="right",
left_on=[self.data.PTID, self.data.C_MONTH],
right_on=[self.data.PTID, FROM],
)
view[self.FORECAST_DIST] = view[TO] - view[FROM]
return view
def getXY(
self,
ptids,
modality,
target,
start_time_points=None,
target_time_points=None,
knnsr_prediction=None,
):
if knnsr_prediction is not None:
view = knnsr_prediction
else:
view = self.views[modality][target]
view = view[view[self.data.PTID].isin(ptids)]
if start_time_points is not None:
view = view[view[FROM].isin(start_time_points)]
if target_time_points is not None:
view = view[view[TO].isin(target_time_points)]
x_columns = [
c for c in view.columns if c not in [self.data.PTID, FROM, TO, self.Y]
]
view = view.dropna().copy()
if start_time_points is not None:
view.loc[:, self.data.C_MONTH] = (
view.loc[:, self.data.C_MONTH] - start_time_points
)
view.loc[:, FROM] = view.loc[:, FROM] - start_time_points
view.loc[:, TO] = view.loc[:, TO] - start_time_points
x = view[x_columns]
y = view[self.Y]
t = view[[FROM, TO]]
return x, y, t
def getY(self, ptids, target, drop_baseline=True):
y = self.data.get_features(ptids, [target])
counts = y[self.data.PTID].value_counts()
y = y[y[self.data.PTID].isin(counts.index[counts >= 2])]
if drop_baseline:
y = y[y[TADPOLEData.C_MONTH] != 0]
y.rename(columns={target: self.Y}, inplace=True)
return y
def get_ptids(self, training=False):
if training:
return self.valid_ptids
return self.data.get_ptids()
def get_modalities(self):
return self.data.get_modalities()
def save(self, output_path, overwrite=False):
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
if ~Path(output_path).exists() or overwrite:
with bz2.BZ2File(output_path, "w") as file:
pickle.dump(["merged", self.views], file)
if self.verbosity == 2:
print("Saved merged %s" % output_path)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,772
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/models/base_model.py
|
from smnsr.patients import AugmentedTADPOLEData, TADPOLEData
import pandas as pd
import numpy as np
class BaseModel:
def __init__(self, data: AugmentedTADPOLEData):
self.data: AugmentedTADPOLEData = data
def _process_x(self, x):
if isinstance(x, list):
return x, []
if isinstance(x, pd.DataFrame):
assert TADPOLEData.RID in x.columns
if TADPOLEData.PTID not in x.columns:
x[TADPOLEData.PTID] = self.data.data.rids_to_ptids(x[TADPOLEData.RID])
ptids = x[self.data.data.PTID].unique()
valid_ptids = []
nan_mask = self.data.data.column_difference(self.data.data._df, x)
for ptid in ptids:
if len(self.data.data.get_patient_modalities(ptid)) == 0:
print(
"Warning: Patient %s has zero valid modalities. Patient will be omitted."
% ptid
)
else:
valid_ptids.append(ptid)
return valid_ptids, nan_mask
def _adjust_target_months(self, ptid, start_month, forecast_start_date, target):
distance = self.data.data.distance_to_date(
ptid, start_month, forecast_start_date
)
return [month + distance for month in target]
def _create_forecast_definition(
self, ptids, forecast_start, forecast_end, step_size=1
):
if isinstance(ptids, pd.DataFrame):
if ptids.shape[1] == 2 and set(
[TADPOLEData.PTID, TADPOLEData.C_MONTH]
) == set(ptids.columns):
return ptids, []
x, nan_mask = self._process_x(ptids)
prediction_definition = pd.DataFrame()
ptid_list = []
month_list = []
for ptid in x:
time_points = self.data.data.get_patient_time_points(ptid)
d_start = self.data.data.distance_to_date(
ptid, time_points[-1], forecast_start
)
d_end = self.data.data.distance_to_date(ptid, time_points[-1], forecast_end)
n_steps = d_end - d_start
month_list += [
d_start + i for i in range(0, n_steps + step_size, step_size)
]
ptid_list += [ptid for _ in range(0, n_steps + step_size, step_size)]
prediction_definition[TADPOLEData.PTID] = ptid_list
prediction_definition[TADPOLEData.C_MONTH] = month_list
return prediction_definition, nan_mask
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,773
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_timeseries_creation.py
|
import bz2
import pickle
import unittest
from smnsr.patients import TADPOLEData, AugmentedTADPOLEData
from smnsr.patients import create_features
from sklearn.model_selection import KFold
import psutil
import ray
import pandas as pd
class TestTimeseriesCreation(unittest.TestCase):
MODALITIES = ["cognitive2"]
OUTPUT_PATH = "../output/"
MODALITY_PATH = "../smnsr/modalities/"
DATA_FILE = MODALITY_PATH + "TADPOLE_D1_D2.csv"
TARGET = "ADAS13"
N_FOLDS = 10
PATIENT_MODALITY = "patient"
def test_create_features(self):
data = TADPOLEData(
data=self.DATA_FILE, modality_k=2, modality_path=self.MODALITY_PATH
)
ray.init(num_cpus=psutil.cpu_count(logical=False), ignore_reinit_error=True)
ptids = data.get_ptids()
timeseries_features, knn_models = create_features(
data, ptids, [], modalities=self.MODALITIES
)
self.assertIsNotNone(timeseries_features)
self.assertTrue(set(timeseries_features.keys()), set(self.MODALITIES))
def test_precomputed_modality_for_ptid(self):
tadpole_data = TADPOLEData(
modality_k=8,
data=self.DATA_FILE,
challenge_filter=True,
modality_path=self.MODALITY_PATH,
)
splitter = KFold(self.N_FOLDS, random_state=0, shuffle=True)
# Patients with no predictable target value, i.e. patients with only baseline target value,
# are not included in the augmented data.
ptids = tadpole_data.get_ptids(min_time_points=2, target=self.TARGET)
modalities = tadpole_data.get_modalities()
for fold, (train_index, test_index) in enumerate(splitter.split(ptids)):
with bz2.BZ2File(self.OUTPUT_PATH + "merged_%i.p" % fold, "rb") as file:
merged = pickle.load(file)[1]
with bz2.BZ2File(self.OUTPUT_PATH + "fold_%i.p" % fold, "rb") as file:
split = pickle.load(file)
train_ptids = [ptids[i] for i in train_index]
test_ptids = [ptids[i] for i in test_index]
aug_data = AugmentedTADPOLEData(
tadpole_data, self.OUTPUT_PATH + "merged_%i.p" % fold, train_ptids
)
for ptid in test_ptids:
self.assertTrue(
tadpole_data.has_modality(
ptid,
self.PATIENT_MODALITY,
target=self.TARGET,
predictable=True,
),
"%s must always have %s modality" % (ptid, self.PATIENT_MODALITY),
)
has_valid_modality = False
df_merged = merged["patient"][self.TARGET]
df_fold = split["patient"][self.TARGET]
self.assertTrue(
df_fold[df_fold[TADPOLEData.PTID].isin([ptid])].shape[0] > 0
)
self.assertTrue(
df_merged[df_merged[TADPOLEData.PTID].isin([ptid])].shape[0] > 0
)
for modality in modalities:
x, y, t = aug_data.getXY([ptid], modality, target=self.TARGET)
if x.shape[0] > 0:
has_valid_modality = True
break
self.assertTrue(
has_valid_modality,
"%s must have at least single valid modality in fold %i"
% (ptid, fold),
)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,774
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/models/__init__.py
|
Y_HAT = "y_hat"
TO = "predict_to"
FROM = "predict_from"
PTID = "PTID"
FORECAST_Y = "y"
from .knn import KNNSR
from .base_model import BaseModel
from .base_stacked_model import BaseStackedModel
from .regression import SMNSR
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,775
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/patients/__init__.py
|
from .TADPOLEData import TADPOLEData
from .AugmentedTADPOLEData import AugmentedTADPOLEData
from .timeseries_creation import create_features
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,776
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_AugmentedTADPOLEData.py
|
from unittest import TestCase
from smnsr.patients import TADPOLEData, AugmentedTADPOLEData
import pathlib
import pickle
import bz2
import os
MODALITY_PATH = "../smnsr/modalities/"
OUTPUT_PATH = "../output/"
TMP_PATH = "./tmp/"
TARGET = "ADAS13"
PTIDS = ["011_S_0002", "011_S_0003", "011_S_0003", "011_S_0003", "011_S_0003"]
TEST_PTID = "test_ptid"
DATA_FILE = "TADPOLE_D1_D2.csv"
class TestAugmentedTADPOLEData(TestCase):
assert os.path.exists(
MODALITY_PATH + DATA_FILE
), "TADPOLE_D1_D2.csv must be stored in the modality folder"
TS_FILE = "fold_0.p"
MERGED_TS_FILE = OUTPUT_PATH + "merged_all.p"
MODALITY = "patient"
SAVE_PATH = OUTPUT_PATH + "merged_saved"
GOOGLE_DRIVE_URL = (
"https://drive.google.com/uc?id=1diTUWzctbl5MfpgoKBuGa-hvIXVgJcx7"
)
assert os.path.exists(
MERGED_TS_FILE
), "A merged ts file must be provided for testing"
_tadpole_data: TADPOLEData = TADPOLEData(
modality_k=2,
challenge_filter=True,
data=MODALITY_PATH + DATA_FILE,
modality_path=MODALITY_PATH,
)
_data: AugmentedTADPOLEData = AugmentedTADPOLEData(
_tadpole_data, MERGED_TS_FILE, _tadpole_data.get_ptids()
)
def test_get_ptids(self):
self.assertTrue(len(self._tadpole_data.get_ptids()) > 0)
def test_get_modalities(self):
self.assertTrue(len(self._tadpole_data.get_modalities()) > 0)
def test_get_xy(self):
x, y, t = self._data.getXY(
["135_S_5275"], modality=self.MODALITY, target=TARGET
)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertEqual(x.shape[0], y.shape[0])
self.assertTrue(x.shape[0] > 0)
def test_y(self):
y = self._data.getY(["135_S_5275"], TARGET)
self.assertIsNotNone(y)
self.assertTrue(y.shape[0] > 0)
def test_save(self):
self._data.save(self.SAVE_PATH, overwrite=False)
self.assertTrue(pathlib.Path(self.SAVE_PATH).exists())
with bz2.BZ2File(self.SAVE_PATH, "rb") as file:
merged = pickle.load(file)
self.assertTrue(isinstance(merged, list) or isinstance(merged, tuple))
self.assertTrue(merged[0] == "merged")
pathlib.Path(self.SAVE_PATH).unlink()
self.assertFalse(pathlib.Path(self.SAVE_PATH).exists())
def test_load_merged_from_disk(self):
self._data.save(self.SAVE_PATH, overwrite=False)
self.assertTrue(pathlib.Path(self.SAVE_PATH).exists())
data: AugmentedTADPOLEData = AugmentedTADPOLEData(
self._tadpole_data, self.MERGED_TS_FILE, self._tadpole_data.get_ptids()
)
x, y, y = data.getXY(self._tadpole_data.get_ptids(), self.MODALITY, TARGET)
self.assertTrue(x.shape[0] > 0)
pathlib.Path(self.SAVE_PATH).unlink()
self.assertFalse(pathlib.Path(self.SAVE_PATH).exists())
def test_load_merged_from_google_drive(self):
data: AugmentedTADPOLEData = AugmentedTADPOLEData(
self._tadpole_data, self.GOOGLE_DRIVE_URL, self._tadpole_data.get_ptids()
)
x, y, y = data.getXY(self._tadpole_data.get_ptids(), self.MODALITY, TARGET)
self.assertTrue(x.shape[0] > 0)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,777
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_cross_validate.py
|
from unittest import TestCase
from smnsr.patients import TADPOLEData, AugmentedTADPOLEData
from smnsr.cross_validate import perform_cv, parse_args, evaluate_predictions
from sklearn.model_selection import KFold
import pickle
class Test(TestCase):
FOLDS = 2
TARGET = "ADAS13"
MODALITY_PATH = "../smnsr/modalities/"
OUTPUT_PATH = "../output/"
MODALITY_K = 2
CV_RESULT_FILE = "cv_result.p"
N_FOLDS = 10
DATA_FILE = MODALITY_PATH + "TADPOLE_D1_D2.csv"
CLI_ARGS = [
"--folds",
str(FOLDS),
"--target",
TARGET,
"--modality_path",
MODALITY_PATH,
"--precomputed_path",
OUTPUT_PATH,
"--bl_forecast",
"--precomputed",
"--mode",
"bypass_knnsr",
"--modality_k",
str(MODALITY_K),
"--output_path",
OUTPUT_PATH,
]
def test_parse_args(self):
args = parse_args(self.CLI_ARGS)
self.assertEqual(args.folds, self.FOLDS)
self.assertEqual(args.target, self.TARGET)
self.assertEqual(args.precompute_path, self.OUTPUT_PATH)
self.assertEqual(args.modality_path, self.MODALITY_PATH)
self.assertTrue(args.bl_forecast)
self.assertTrue(args.precomputed)
def test_perform_cv(self):
results = perform_cv(parse_args(self.CLI_ARGS))
self.assertIsNotNone(results)
def test_evaluate_predictions(self):
tadpole_data = TADPOLEData(
data=self.DATA_FILE, modality_k=2, modality_path=self.MODALITY_PATH
)
cv_results = pickle.load(open(self.OUTPUT_PATH + self.CV_RESULT_FILE, "rb"))
evaluate_predictions(cv_results, tadpole_data)
self.assertTrue(True)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,778
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_regression.py
|
from unittest import TestCase
from smnsr.models import SMNSR
from smnsr.patients import AugmentedTADPOLEData, TADPOLEData
import pandas as pd
import pandas as pd
import numpy as np
import os
class TestSNSR(TestCase):
MODALITY = "cognitive1"
OUTPUT_PATH = "../output/"
MODALITY_PATH = "../smnsr/modalities/"
TARGET = "ADAS13"
DATA_FILE = MODALITY_PATH + "TADPOLE_D1_D2.csv"
D3_DATA_FILE = MODALITY_PATH + "D3.csv"
TS_FILE = OUTPUT_PATH + "merged_all.p"
PRE_CALCULATED_KNNSR = (
"https://drive.google.com/uc?id=1diTUWzctbl5MfpgoKBuGa-hvIXVgJcx7"
)
assert os.path.exists(
DATA_FILE
), "TADPOLE_D1_D2.csv must be stored in the modality folder"
assert os.path.exists(D3_DATA_FILE), "D3.csv must be stored in the modality folder"
assert os.path.exists(DATA_FILE), "Pre-merged ts data must be provided"
tadpole_data = TADPOLEData(
data=DATA_FILE, modality_k=8, challenge_filter=True, modality_path=MODALITY_PATH
)
data = AugmentedTADPOLEData(tadpole_data, TS_FILE, tadpole_data.get_ptids())
FORECAST_STEP_SIZE = 6
FORECAST_DISTANCE = 120
D1_D2_FILE = "TADPOLE_D1_D2.csv"
def __forecast_test(self, x, forecast_start="2018-01-01", data=None):
if data is None:
data = self.data
model = SMNSR(data, training_cv_folds=2, mode="bypass_knnsr", forecast=True)
model.fit(data.get_ptids())
y_hat = model.predict(
x.tail(10),
self.TARGET,
forecast_start=forecast_start,
forecast_end="2022-12-01",
)
self.assertIsNotNone(y_hat)
self.assertTrue(y_hat.shape[0] > 0)
def test_forecast(self):
x = self.data.get_ptids()[0:100]
n_patients = len(x)
model = SMNSR(
self.data, training_cv_folds=2, mode="bypass_knnsr", forecast=True
)
model.fit(self.data.get_ptids())
y_hat = model.predict(x, self.TARGET)
self.assertIsNotNone(y_hat)
self.assertTrue(y_hat.shape[0] > 0)
def test_forecast_on_df(self):
x: pd.DataFrame = pd.read_csv(
self.MODALITY_PATH + self.D1_D2_FILE, low_memory=False
)
x = x.groupby(TADPOLEData.PTID).tail(1)
self.__forecast_test(x)
def test_forecast_on_df_to_date(self):
x: pd.DataFrame = pd.read_csv(
self.MODALITY_PATH + self.D1_D2_FILE, low_memory=False
)
x = x.groupby(TADPOLEData.PTID).tail(10)
x = self.tadpole_data.df_raw
self.__forecast_test(x, forecast_start="2020-12-01")
def test_forecast_on_df_3(self):
x: pd.DataFrame = pd.read_csv(self.D3_DATA_FILE, low_memory=False)
x[TADPOLEData.PTID] = self.data.data.rids_to_ptids(x[TADPOLEData.RID])
x = x.iloc[0:100, :]
tadpole_data = TADPOLEData(
data=self.DATA_FILE,
modality_k=2,
challenge_filter=False,
modality_path=self.MODALITY_PATH,
)
data = AugmentedTADPOLEData(
tadpole_data, self.TS_FILE, tadpole_data.get_ptids()
)
self.__forecast_test(x, data=data)
def test_download_and_forecast_all(self):
data = AugmentedTADPOLEData(
self.tadpole_data, self.TS_FILE, self.tadpole_data.get_ptids()
)
model = SMNSR(
data,
training_cv_folds=2,
verbosity=2,
mode="bypass_knnsr",
max_modalities=8,
forecast=True,
)
model.fit(data.get_ptids())
prediction = model.predict(self.tadpole_data.df_raw, target=self.TARGET)
self.assertTrue(prediction.shape[0] > 0)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,779
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/models/base_stacked_model.py
|
from smnsr.patients import AugmentedTADPOLEData, TADPOLEData, create_features
from smnsr.models import BaseModel
import ray
import psutil
class BaseStackedModel(BaseModel):
def __init__(self, data: AugmentedTADPOLEData):
super().__init__(data)
def __df_to_pdb(self, df, n_cpus=0):
if n_cpus < 1:
num_cpus = psutil.cpu_count(logical=False)
ray.init(num_cpus=num_cpus)
# Create a new patient database
data = TADPOLEData(file=df)
# Create augmented features for training
ts_features, knn_models = create_features(
data, data.get_ptids(), [], return_knn=True
)
return AugmentedTADPOLEData(data=data, time_series_data=ts_features), knn_models
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,780
|
rciszek/SMNSR
|
refs/heads/master
|
/smnsr/__init__.py
|
import logging.config
import yaml
import os
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "logging.yaml"
env_key = "LOG_CFG"
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, "rt") as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
print("logging.yaml does not exist")
logging.basicConfig(level=logging.DEBUG)
from .cross_validate import perform_cv, parse_args
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,781
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_TADPOLEData.py
|
import unittest
from smnsr.patients import TADPOLEData
import pandas as pd
import os
import shutil
MODALITY_PATH = "../smnsr/modalities/"
OUTPUT_PATH = "../output/"
TMP_PATH = "./tmp/"
TARGET = "ADAS13"
D3_FILE = "D3.csv"
TEST_PTID = "test_ptid"
DATA_FILE = MODALITY_PATH + "TADPOLE_D1_D2.csv"
class TestTADPOLEWrapper(unittest.TestCase):
assert os.path.exists(
DATA_FILE
), "TADPOLE_D1_D2.csv must be stored in the modality folder"
_data = TADPOLEData(data=DATA_FILE, modality_path=MODALITY_PATH)
def test_init_from_df(self):
df = pd.read_csv(DATA_FILE)
data = TADPOLEData(data=df, modality_path=MODALITY_PATH)
self.assertTrue(len(data.get_ptids()) > 0)
def test_save_modality(self):
if not os.path.exists(TMP_PATH):
os.mkdir(TMP_PATH)
self._data.save_modality(TMP_PATH)
self.assertTrue(os.path.exists(TMP_PATH))
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
def test_get_modalities(self):
modalities = self._data.get_modalities()
self.assertIsNotNone(modalities)
self.assertTrue(len(modalities) > 0)
#
def test_get_xy(self):
ptids = self._data.get_ptids()
for modality in self._data.get_modalities():
x, y = self._data.getXY(ptids, modality, TARGET)
self.assertIsNotNone(x)
self.assertIsNotNone(y)
self.assertTrue(x.shape[0] > 0)
self.assertTrue(y.shape[0] == x.shape[0])
def test_get_ptids(self):
ptids = self._data.get_ptids()
self.assertIsNotNone(ptids)
self.assertTrue(len(ptids) > 0)
def __create_test_data(self):
d3_data = pd.read_csv(MODALITY_PATH + D3_FILE)
d3_data = d3_data.tail(1).copy()
d3_data[TADPOLEData.PTID] = self._data.get_ptids()
d3_data[TADPOLEData.C_MONTH] = 0
return d3_data
def test_distance_to_date(self):
months = self._data.distance_to_date("011_S_0002", 0, "2015-09-08")
self.assertEqual(120, months)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,782
|
rciszek/SMNSR
|
refs/heads/master
|
/setup.py
|
import setuptools
setuptools.setup(
name="smnsr",
version="0.4.5",
author="rciszek",
author_email="rkciszek@gmail.com",
description="SMNSR",
long_description="SMNSR",
long_description_content_type="text/markdown",
url="https://github.com/rciszek/SMNSR/",
packages=setuptools.find_packages(),
python_requires=">=3.7",
scripts=[("bin/smnsr_ts_folds"), ("bin/smnsr_cv"), ("bin/smnsr_merge")],
install_requires=[
"PySocks >= 1.7.1",
"pandas >= 1.0.5",
"matplotlib >= 3.2.2",
"scipy >= 1.4.1",
"xgboost >= 1.1.1",
"psutil >= 5.7.0",
"ray >= 0.8.4",
"vaex_core >= 2.0.3",
"pytictoc >= 1.5.0",
"gdown >= 3.11.1",
"numpy >= 1.18.4",
"seaborn >= 0.10.1",
"python_dateutil >= 2.8.1",
"PyYAML >= 5.3.1",
"scikit_learn >= 0.23.1",
"vaex >= 3.0.0",
],
package_data={
"smnsr": ["logging.yaml"],
"smnsr": ["modalities/modalities.yaml"]
},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,783
|
rciszek/SMNSR
|
refs/heads/master
|
/tests/test_KNNSR.py
|
from unittest import TestCase
from smnsr.patients import TADPOLEData
from smnsr.models import KNNSR
import numpy as np
MODALITY_PATH = "../smnsr/modalities/"
DATA_FILE = MODALITY_PATH + "TADPOLE_D1_D2.csv"
class TestKNNSRModel(TestCase):
data = TADPOLEData(
data=DATA_FILE, challenge_filter=True, modality_k=2, modality_path=MODALITY_PATH
)
PTID = "011_S_0002"
C_TIME = 0.0
FORECAST_WINDOW = 6.0
def test_fit(self):
ptids = self.data.get_ptids()
knn = KNNSR(self.data)
knn.fit(ptids)
self.assertTrue(knn.fitted)
def test_predict(self):
x = (
np.array([[self.PTID, self.C_TIME]]),
np.array([self.C_TIME + self.FORECAST_WINDOW]),
)
ptids = self.data.get_ptids()
knn = KNNSR(self.data, modality="patient")
knn.fit(ptids)
prediction = knn.predict(x)
self.assertIsNotNone(prediction)
|
{"/smnsr/models/knn.py": ["/smnsr/models/__init__.py"], "/smnsr/cross_validate.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py", "/smnsr/models/knn.py"], "/smnsr/patients/timeseries_creation.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/patients/AugmentedTADPOLEData.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_model.py": ["/smnsr/patients/__init__.py"], "/tests/test_timeseries_creation.py": ["/smnsr/patients/__init__.py"], "/smnsr/models/__init__.py": ["/smnsr/models/knn.py", "/smnsr/models/base_model.py", "/smnsr/models/base_stacked_model.py", "/smnsr/models/regression.py"], "/smnsr/patients/__init__.py": ["/smnsr/patients/TADPOLEData.py", "/smnsr/patients/AugmentedTADPOLEData.py", "/smnsr/patients/timeseries_creation.py"], "/tests/test_AugmentedTADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_cross_validate.py": ["/smnsr/patients/__init__.py", "/smnsr/cross_validate.py"], "/tests/test_regression.py": ["/smnsr/models/__init__.py", "/smnsr/patients/__init__.py"], "/smnsr/models/base_stacked_model.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"], "/smnsr/__init__.py": ["/smnsr/cross_validate.py"], "/tests/test_TADPOLEData.py": ["/smnsr/patients/__init__.py"], "/tests/test_KNNSR.py": ["/smnsr/patients/__init__.py", "/smnsr/models/__init__.py"]}
|
39,792
|
AdrianDubel/Selenium-Project
|
refs/heads/master
|
/Tests/test_Add_Book.py
|
from selenium import webdriver
import unittest
from Pages.Addresses_Page import AddressPage
from Pages.Home_Page import HomePage
from faker import Faker
class AddAddressTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.implicitly_wait(10)
cls.driver.maximize_window()
def test_add_address(self):
driver = self.driver
url = "http://a.testaddressbook.com/sign_in"
email = "test@wp.pl"
password = "12345678"
faker = Faker()
firstname = faker.first_name()
lastname = faker.last_name()
address1 = "Street Avenue"
address2 = "23"
city = "Los Angeles"
state = "ke"
age = "27"
zip = faker.zipcode()
phone = "222333444"
driver.get(url)
home_page = HomePage(driver)
home_page.enter_email(email)
home_page.enter_password(password)
home_page.click_submit()
address_page = AddressPage(driver)
address_page.click_address()
address_page.add_new_address()
address_page.enter_first_name(firstname)
address_page.enter_last_name(lastname)
address_page.first_address(address1)
address_page.second_address(address2)
address_page.enter_city(city)
address_page.state(state)
address_page.enter_zipcode(zip)
address_page.enter_age(age)
address_page.enter_phone(phone)
address_page.submit()
address_page.verify_alert()
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
|
{"/Tests/test_Add_Book.py": ["/Pages/Addresses_Page.py", "/Pages/Home_Page.py"], "/Tests/test_Login.py": ["/Pages/Home_Page.py"], "/Tests/test_Register.py": ["/Pages/Home_Page.py"]}
|
39,793
|
AdrianDubel/Selenium-Project
|
refs/heads/master
|
/Pages/Addresses_Page.py
|
from selenium.webdriver.common.keys import Keys
class AddressPage:
def __init__(self, driver):
self.driver = driver
self.addresses_btn_css = "[data-test='addresses']"
self.new_address_xpath = "//a[.='New Address']"
self.firstname_input_css = "#address_first_name"
self.lastname_input_css = "#address_last_name"
self.firstaddress_input_css = "#address_street_address"
self.secondaddress_input_css = "#address_secondary_address"
self.city_input_css = "#address_city"
self.state_drop_css = "#address_state"
self.zip_code_css = "#address_zip_code"
self.age_input_css = "#address_age"
self.phone_input_css = "#address_phone"
self.climbing_check_css = "#address_interest_climb"
self.submit_address_xpath = "//input[@name='commit']"
self.alert_css = ".alert"
def click_address(self):
self.driver.find_element_by_css_selector(self.addresses_btn_css).click()
def add_new_address(self):
self.driver.find_element_by_xpath(self.new_address_xpath).click()
def enter_first_name(self, name):
self.driver.find_element_by_css_selector(self.firstname_input_css).send_keys(name)
def enter_last_name(self, lastname):
self.driver.find_element_by_css_selector(self.lastname_input_css).send_keys(lastname)
def first_address(self, firstaddress):
self.driver.find_element_by_css_selector(self.firstaddress_input_css).send_keys(firstaddress)
def second_address(self, secondaddress):
self.driver.find_element_by_css_selector(self.secondaddress_input_css).send_keys(secondaddress)
def enter_city(self, city):
self.driver.find_element_by_css_selector(self.city_input_css).send_keys(city)
def state(self, state):
drop = self.driver.find_element_by_css_selector(self.state_drop_css)
drop.click()
drop.send_keys(state)
drop.send_keys(Keys.ENTER)
def enter_zipcode(self, zipcode):
self.driver.find_element_by_css_selector(self.zip_code_css).send_keys(zipcode)
def enter_age(self, age):
self.driver.find_element_by_css_selector(self.age_input_css).send_keys(age)
def enter_phone(self, number):
self.driver.find_element_by_css_selector(self.phone_input_css).send_keys(number)
def climbing(self):
self.driver.find_element_by_css_selector(self.climbing_check_css).click()
def submit(self):
self.driver.find_element_by_xpath(self.submit_address_xpath).click()
def verify_alert(self):
alert = self.driver.find_element_by_css_selector(self.alert_css)
alert_visible = alert.is_displayed()
alert_text = alert.text
assert alert_visible == True
assert alert_text == "Address was successfully created."
|
{"/Tests/test_Add_Book.py": ["/Pages/Addresses_Page.py", "/Pages/Home_Page.py"], "/Tests/test_Login.py": ["/Pages/Home_Page.py"], "/Tests/test_Register.py": ["/Pages/Home_Page.py"]}
|
39,794
|
AdrianDubel/Selenium-Project
|
refs/heads/master
|
/Pages/Home_Page.py
|
class HomePage:
def __init__(self, driver):
self.driver = driver
self.home_btn_xpath = "//a[@href='/']"
self.signin_btn_xpath = "//a[@id='sign-in']"
self.email_input_css = "#session_email"
self.password_input_css = "#session_password"
self.submit_xpath = "//input[@name='commit']"
self.signup_btn_css = "[data-test='sign-up']"
self.reg_email_input_css = "#user_email"
self.reg_password_input_css = "#user_password"
self.title_css = "h1"
self.alert_css = ".alert"
def click_home(self):
self.driver.find_element_by_xpath(self.home_btn_xpath).click()
def click_signin(self):
self.driver.find_element_by_xpath(self.signin_btn_xpath).click()
def enter_email(self, email):
self.driver.find_element_by_css_selector(self.email_input_css).send_keys(email)
def enter_password(self, password):
self.driver.find_element_by_css_selector(self.password_input_css).send_keys(password)
def click_submit(self):
self.driver.find_element_by_xpath(self.submit_xpath).click()
def click_signup(self):
self.driver.find_element_by_css_selector(self.signup_btn_css).click()
def enter_new_email(self, email):
self.driver.find_element_by_css_selector(self.reg_email_input_css).send_keys(email)
def enter_new_password(self, password):
self.driver.find_element_by_css_selector(self.reg_password_input_css).send_keys(password)
def check_header(self):
title = self.driver.find_element_by_css_selector(self.title_css)
title_text = title.text
title_visible = title.is_displayed()
assert title_text == "Welcome to Address Book"
assert title_visible == True
def check_alert(self):
error = self.driver.find_element_by_css_selector(self.alert_css)
error_text = error.text
error_visible = error.is_displayed()
assert error_text == "Bad email or password."
assert error_visible == True
|
{"/Tests/test_Add_Book.py": ["/Pages/Addresses_Page.py", "/Pages/Home_Page.py"], "/Tests/test_Login.py": ["/Pages/Home_Page.py"], "/Tests/test_Register.py": ["/Pages/Home_Page.py"]}
|
39,795
|
AdrianDubel/Selenium-Project
|
refs/heads/master
|
/Tests/test_Login.py
|
from selenium import webdriver
import unittest
from Pages.Home_Page import HomePage
class LoginTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.implicitly_wait(10)
cls.driver.maximize_window()
def test_login_valid(self):
driver = self.driver
url = "http://a.testaddressbook.com/sign_in"
email = "test@wp.pl"
password = "12345678"
driver.get(url)
home_page = HomePage(driver)
home_page.enter_email(email)
home_page.enter_password(password)
home_page.click_submit()
home_page.check_header()
def test_login_invalid(self):
driver = self.driver
url = "http://a.testaddressbook.com/sign_in"
email = "test_wrong@wp.pl"
password = "12345678"
driver.get(url)
home_page = HomePage(driver)
home_page.enter_email(email)
home_page.enter_password(password)
home_page.click_submit()
home_page.check_alert()
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
|
{"/Tests/test_Add_Book.py": ["/Pages/Addresses_Page.py", "/Pages/Home_Page.py"], "/Tests/test_Login.py": ["/Pages/Home_Page.py"], "/Tests/test_Register.py": ["/Pages/Home_Page.py"]}
|
39,796
|
AdrianDubel/Selenium-Project
|
refs/heads/master
|
/Tests/test_Register.py
|
from selenium import webdriver
import unittest
from Pages.Home_Page import HomePage
from faker import Faker
class RegisterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.driver.implicitly_wait(10)
cls.driver.maximize_window()
def test_register(self):
driver = self.driver
url = "http://a.testaddressbook.com/sign_in"
faker = Faker()
email = faker.company_email()
password = "12345678"
driver.get(url)
home_page = HomePage(driver)
home_page.click_signup()
home_page.enter_new_email(email)
home_page.enter_new_password(password)
home_page.click_submit()
home_page.check_header()
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
|
{"/Tests/test_Add_Book.py": ["/Pages/Addresses_Page.py", "/Pages/Home_Page.py"], "/Tests/test_Login.py": ["/Pages/Home_Page.py"], "/Tests/test_Register.py": ["/Pages/Home_Page.py"]}
|
39,797
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/reviews/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-26 20:24
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=140)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.PositiveIntegerField(default=5, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('comment', models.TextField(max_length=140)),
('created_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,798
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/reviews/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import *
# Create your views here.
def index(request):
all_reviews = Review.objects.all().order_by('-created_date')[:5]
books_with_reviews = Book.objects.exclude(reviews__isnull=True)
context = {
'all_reviews': all_reviews,
'books_with_reviews': books_with_reviews,
}
return render(request, 'reviews/index.html', context)
def book_detail(request, book_id):
book_query = get_object_or_404(Book, pk=book_id)
review_form = ReviewForm(request.POST or None)
context = {
'book_query': book_query,
'book_id': book_id,
'review_form': review_form,
}
return render(request, 'reviews/book_detail.html', context)
@login_required(login_url='/')
def book_add_review(request, book_id):
review_form = ReviewForm(request.POST or None)
if review_form.is_valid():
book_query = Book.objects.get(id=book_id)
new_review = review_form.save(commit=False)
new_review.user = get_object_or_404(User, pk=request.user.id)
new_review.book = book_query
new_review.save()
return redirect('reviews:book_detail', book_id=book_id)
@login_required(login_url='/')
def add_book(request):
book_form = BookForm(request.POST or None)
select_author_form = SelectAuthorForm(request.POST or None)
create_author_form = AuthorForm(request.POST or None)
review_form = ReviewForm(request.POST or None)
context = {
'book_form': book_form,
'select_author_form': select_author_form,
'create_author_form': create_author_form,
'review_form': review_form,
}
if book_form.is_valid() and review_form.is_valid():
# check if book exists
new_book = book_form.save(commit=False)
try:
book_query = Book.objects.get(title__iexact=new_book.title)
except Book.DoesNotExist:
book_form.save()
book_query = Book.objects.get(title__iexact=new_book.title)
# query author to add to the new book
# either select or create and author
if select_author_form.has_changed() and select_author_form.is_valid() and not create_author_form.has_changed():
author_query = select_author_form.cleaned_data.get('choose_author')
elif create_author_form.has_changed() and create_author_form.is_valid() and not select_author_form.has_changed():
first_name = create_author_form.cleaned_data.get('first_name')
last_name = create_author_form.cleaned_data.get('last_name')
# check if entered author exists
if Author.objects.filter(first_name__iexact=first_name).filter(last_name__iexact=last_name):
messages.error(request, 'Author exists, please select from the list.')
return redirect('reviews:add_book')
else:
author_query = Author.objects.create(first_name=first_name, last_name=last_name)
else:
messages.error(request, 'You can either select or create an author.')
return redirect('reviews:add_book')
# add author to book
book_query.authors.add(author_query)
# add review
new_review = review_form.save(commit=False)
new_review.user = get_object_or_404(User, pk=request.user.id)
new_review.book = book_query
new_review.save()
book_id = book_query.id
return redirect('reviews:book_detail', book_id=book_id)
return render(request, 'reviews/add_new.html', context)
@login_required(login_url='/')
def user_detail(request, user_id):
user_query = get_object_or_404(User, id=user_id)
reviews_query = user_query.reviews.all()
reviews_query_count = reviews_query.count()
context = {
'user_query': user_query,
'reviews_query': reviews_query,
'reviews_query_count': reviews_query_count,
}
return render(request, 'reviews/user_detail.html', context)
@login_required(login_url='/')
def review_delete(request, book_id, review_id):
Review.objects.get(pk=review_id).delete()
return redirect('reviews:book_detail', book_id=book_id)
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,799
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/reviews/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^books/(?P<book_id>\d+)$', views.book_detail, name='book_detail'),
url(r'^books/(?P<book_id>\d+)/add_review$', views.book_add_review, name='book_add_review'),
url(r'^books/add$', views.add_book, name='add_book'),
url(r'^books/(?P<book_id>\d+)/review/(?P<review_id>\d+)/delete$',
views.review_delete, name='review_delete'),
url(r'^users/(?P<user_id>\d+)$', views.user_detail, name='user_detail'),
]
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,800
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/login_register/forms.py
|
from django import forms
from django.contrib.auth import authenticate, get_user_model, login, logout
User = get_user_model()
class UserLoginForm(forms.Form):
"""
" use the following to use EMAIL to sign in
' email = forms.EmailField()
"""
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
def clean(self, *arg, **kwargs):
""""
" email = self.cleaned_data.get('email')
"""
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
"""
" if email and password:
" username = User.objects.get(email=email).username
"""
user_auth = authenticate(username=username, password=password)
user_query = User.objects.filter(username=username)
if not user_query.exists():
raise forms.ValidationError('User does not exist.')
if not user_auth:
raise forms.ValidationError('Somthing is wrong with your username or password.')
# if not user.check_password(password):
# raise forms.ValidationError('Somthing is wrong with your username or password.')
if not user_auth.is_active:
raise forms.ValidationError('This user is no longer active')
return super(UserLoginForm, self).clean()
class UserRegistrationForm(forms.ModelForm):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput,
min_length=8,
help_text='At least 8 digits long.')
confirm_password = forms.CharField(widget=forms.PasswordInput,
label='Confirm Password',)
class Meta:
model = User
fields = [
'username',
'first_name',
'last_name',
'email',
'password',
'confirm_password',
]
def clean(self):
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
email = self.cleaned_data.get('email')
if password != confirm_password:
raise forms.ValidationError('Passwords must match.')
email_query = User.objects.filter(email=email)
if email_query.exists():
raise forms.ValidationError('Email already exists.')
return super(UserRegistrationForm, self).clean()
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,801
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/reviews/models.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils import timezone
# Create your models here.
class Author(models.Model):
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
def __str__(self):
name = self.first_name.title() + ' ' + self.last_name.title()
return name
class Book(models.Model):
title = models.CharField(max_length=140)
authors = models.ManyToManyField(Author, related_name='books')
def __str__(self):
return self.title
class Review(models.Model):
rating = models.PositiveIntegerField(default=5,
validators=(MinValueValidator(1),
MaxValueValidator(5))
)
comment = models.TextField(max_length=140)
user = models.ForeignKey(User, related_name='reviews')
book = models.ForeignKey(Book, related_name='reviews')
created_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.comment
def update(self):
self.update_date = timezone.now
self.save()
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,802
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/login_register/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout, get_user_model
from .forms import *
# Create your views here.
def index(request):
login_form = UserLoginForm(request.POST or None)
reg_form = UserRegistrationForm(request.POST or None)
context = {
'login_form': login_form,
'reg_form': reg_form,
}
return render(request, 'login_register/index.html', context)
def to_login(request):
login_form = UserLoginForm(request.POST or None)
reg_form = UserRegistrationForm()
context = {
'login_form': login_form,
'reg_form': reg_form,
}
if login_form.is_valid():
"""
" use the following if using EMAIL to sign in
" email = login_form.cleaned_data.get('email')
"""
username = login_form.cleaned_data.get('username')
password = login_form.cleaned_data.get('password')
user_model = get_user_model()
"""
" username = user_model.objects.get(email=email).username
"""
user = authenticate(username=username, password=password)
login(request, user)
return redirect('reviews:index')
return render(request, 'login_register/index.html', context)
def to_logout(request):
logout(request)
return redirect('login_register:index')
def to_register(request):
login_form = UserLoginForm()
reg_form = UserRegistrationForm(request.POST or None)
context = {
'login_form': login_form,
'reg_form': reg_form,
}
if reg_form.is_valid():
new_user = reg_form.save(commit=False)
password = reg_form.cleaned_data.get('password')
new_user.set_password(password)
new_user.save()
log_new_user = authenticate(username=new_user.username, password=password)
login(request, log_new_user)
return redirect('reviews:index')
return render(request, 'login_register/index.html', context)
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,803
|
ayuspark/book_reviews
|
refs/heads/master
|
/apps/reviews/forms.py
|
from django import forms
from django.contrib.auth import authenticate, get_user_model, login, logout
from .models import *
User = get_user_model()
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ('title',)
class AuthorForm(forms.ModelForm):
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
class Meta:
model = Author
fields = ('first_name', 'last_name')
widgets = {
'first_name': forms.TextInput(attrs={'placeholder': 'first name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'last name'})
}
class SelectAuthorForm(forms.Form):
choose_author = forms.ModelChoiceField(queryset=Author.objects.all().order_by('first_name'),
label='Select an author',
required=False)
class ReviewForm(forms.ModelForm):
rating = forms.ChoiceField(choices=[(x, x) for x in range(1, 6)])
class Meta:
model = Review
fields = ('comment', 'rating')
widgets = {
'comment': forms.Textarea(attrs={'class': 'form-control', 'rows': '3'})
}
|
{"/apps/reviews/views.py": ["/apps/reviews/models.py", "/apps/reviews/forms.py"], "/apps/login_register/views.py": ["/apps/login_register/forms.py"], "/apps/reviews/forms.py": ["/apps/reviews/models.py"]}
|
39,868
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/CLI.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import serial
import GetUserData
import GetSat
import GetLook
from Pelco_D import Tracker
'''
#Serial Output Mode
#if you want to use serial.
#Linux
ser=serial.Serial("COM1",9600,timeout=0.5)
#Windows
ser=serial.Serial("/dev/ttyUSB0",9600,timeout=0.5)
'''
#Normal Mode
GetUserData.update("shell")
line1,line2,Lat,Lon,Alt = GetUserData.get_user_data("shell",'','','','' )
#Test Mode
# line1 = "1 27607U 02058C 18338.87750224 -.00000040 00000-0 15328-4 0 9993"
# line2 = "2 27607 64.5552 288.2486 0079235 293.7328 65.5477 14.75502172858143"
# Lat,Lon,Alt = 30,30,0
GetSat.generate(line1,line2)
GetLook.generate(Lat,Lon,Alt)
# print "You are tracking : "+str.upper(Sat)+"."
print "You are at Lat : "+str(Lat) +\
" Lon : "+str(Lon)+\
" Altitude :"+str(Alt)+"m\n"
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ,EL = GetLook.GetLook(tt,eciSat)
if EL>=5:
raw_input("Passing Now,Press ENTER to continue tracking ...")
if EL<5 :
pass_time , maxEL = GetLook.GetPassData(tt,eciSat)
local_time = time.localtime(pass_time)
print "Next Pass Time: " + time.asctime(local_time)+"\n"
raw_input("Press ENTER to continue tracking ...\n\n")
AZ_now = 0
EL_now = 0
a=0
b=0
T=0
if T == 1:
Tracker = Tracker("/dev/ttyUSB0",2400)
while True:
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ,EL = GetLook.GetLook(tt,eciSat)
serial_str="AZ"+str(AZ)+" EL"+str(EL)+" Easycomm"
print AZ,EL
print AZ_now,EL_now
if T==1:
if AZ_now < AZ :
Tracker.right()
a=time.time()
azspeed=float(3.2)
print "right"
time.sleep(0.2)
if AZ_now > AZ :
Tracker.left()
a=time.time()
azspeed=float(-3.2)
print "left"
time.sleep(0.2)
if EL_now < EL :
Tracker.up()
b=time.time()
elspeed=float(3.2)
print "up"
time.sleep(0.2)
if EL_now > EL :
Tracker.down()
b=time.time()
elspeed=float(-3.2)
print "down"
time.sleep(0.2)
AZ_now=float(AZ_now)+(time.time()-a)*azspeed
EL_now=float(EL_now)+(time.time()-b)*elspeed
Tracker.stop()
'''
#if you want to use serial
ser.write("Whatever you need")
#For example: ser.write(serial_str)
'''
'''
#If you need to control the frequency of date out:
time.sleep() #Second
'''
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,869
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from flask import jsonify
from flask import render_template
import time
import serial
import GetUserData
import GetSat
import GetLook
import Pelco_D
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
context = {
'lat': 0,
'lon': 0,
'alt': 0,
'az': 0,
'el': 0
}
return render_template('index.html', **context)
@app.route('/update', methods=['POST', "GET"])
def update():
GetUserData.update("gui")
res = {
'az': "Update Done",
'el': " ",
}
return jsonify(res)
@app.route('/predict', methods=['POST', "GET"])
def predict():
receive = request.json
Sat = str(receive['satname'])
Lat = float(receive['lat'])
Lon = float(receive['lon'])
Alt = float(receive['alt'])
line1, line2, Lat, Lon, Alt = GetUserData.get_user_data("gui", Sat, Lat, Lon, Alt)
GetSat.generate(line1, line2)
GetLook.generate(Lat, Lon, Alt)
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ, EL = GetLook.GetLook(tt, eciSat)
res = {
'az': AZ,
'el': EL
}
return jsonify(res)
@app.route('/newTracker', methods=['POST', "GET"])
def newTracker():
receive = request.json
global Tracker
if receive['cmd'] == "y":
Tracker = Pelco_D.Tracker("/dev/ttyUSB0",2400)
if receive['cmd'] == "n":
Tracker.close()
del Tracker
return "ok"
@app.route('/setstep', methods=['POST', "GET"])
def setstep():
receive = request.json
if receive['cmd'] == "LD":
Tracker.down()
time.sleep(0.5)
Tracker.left()
if receive['cmd'] != "LD":
if receive['cmd'] == "up":
Tracker.up()
if receive['cmd'] == "down":
Tracker.down()
if receive['cmd'] == "left":
Tracker.left()
if receive['cmd'] == "right":
Tracker.right()
time.sleep(0.5)
Tracker.stop()
return "ok"
@app.route('/track', methods=['POST', "GET"])
def track():
receive = request.json
if receive['lat'] == "update":
GetUserData.update("gui")
res = {
'az': "Update Done",
'el': " ",
}
return jsonify(res)
if __name__ == '__main__':
app.debug = True
app.run('0.0.0.0', 8080)
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,870
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/about_Tracker/LSM303DLH.py
|
import wiringpi as wpi
import math
ACC=wpi.wiringPiI2CSetup(0x18)
wpi.wiringPiI2CWriteReg8(ACC,0x20, 0x27)
while True:
ACC_x = wpi.wiringPiI2CReadReg8(ACC, 0x28) << 8 | wpi.wiringPiI2CReadReg8(ACC, 0x29)
#
ACC_y = wpi.wiringPiI2CReadReg8(ACC, 0x2a) << 8 | wpi.wiringPiI2CReadReg8(ACC, 0x2b)
#
ACC_z = wpi.wiringPiI2CReadReg8(ACC, 0x2c) << 8 | wpi.wiringPiI2CReadReg8(ACC, 0x2d)
#
#
#
#
#
if ACC_x > 32768:
ACC_x = -(65535 - ACC_x + 1)
#
#
if ACC_y > 32768:
ACC_y = -(65535 - ACC_y + 1)
#
#
if ACC_z > 32768:
ACC_z = -(65535 - ACC_z + 1)
#
if ACC_x>15936:
ACC_x=15936
if ACC_x<-15936:
ACC_x=-15936
#
# angle = math.acos(float(yAccl)/float(1024))*180/5.0/math.pi
#angle= math.atan2(ACC_y,ACC_x)* (180/3.14159265) + 180
# angle
AZ=90-math.acos(float(ACC_x)/float(15936))*180/math.pi
AZ
#XAxis = (float(X) - xOffset) * mgPerDigit;
#YAxis = (float(Y) - yOffset) * mgPerDigit;
#ZAxis = float(Z) * mgPerDigit;
import wiringpi as wpi
import math
import time
MAG=wpi.wiringPiI2CSetup(0x1e)
wpi.wiringPiI2CWriteReg8(MAG,0x00, 0x0c)#Minimum data output rate (Hz)
wpi.wiringPiI2CWriteReg8(MAG,0x01, 0x20)
wpi.wiringPiI2CWriteReg8(MAG,0x02, 0x00)
while True:
MAG_x = wpi.wiringPiI2CReadReg8(MAG, 0x03) << 8 | wpi.wiringPiI2CReadReg8(MAG, 0x04)
#
MAG_y = wpi.wiringPiI2CReadReg8(MAG, 0x05) << 8 | wpi.wiringPiI2CReadReg8(MAG, 0x06)
#
MAG_z = wpi.wiringPiI2CReadReg8(MAG, 0x07) << 8 | wpi.wiringPiI2CReadReg8(MAG, 0x08)
#
#
#
#
if MAG_x>32768:
MAG_x = -(65535 - MAG_x + 1)
#
#
if MAG_y>32768:
MAG_y = -(65535 - MAG_y + 1)
#
#
if MAG_z>32768:
MAG_z = -(65535 - MAG_z + 1)
#
heading = math.atan2(float(MAG_y), float(MAG_x))
declinationAngle = (114.0 + (43.0 / 60.0)) / (180 / math.pi)
heading = heading + declinationAngle
print heading*180.0/math.pi
time.sleep(0.5)
#
print MAG_x,MAG_y,MAG_z
angle= math.atan2(float(MAG_y),float(MAG_x))*(180/math.pi)+180
angle
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,871
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/Pelco_D.py
|
import serial
up = 0xff,0x01,0x00,0x10,0x00,0xff,0x10
down = 0xff,0x01,0x00,0x08,0x00,0xff,0x08
left = 0xff,0x01,0x00,0x04,0xff,0x00,0x04
right = 0xff,0x01,0x00,0x02,0xff,0x00,0x02
stop = 0xff,0x01,0x00,0x00,0x00,0x00,0x01
'''
how to use:
from Pelco_D import Tracker
Tracker = Tracker("/dev/ttyUSB0",2400)
'''
class Tracker:
def __init__(self, add, baud_rate):
self.ser=serial.Serial("/dev/ttyUSB0",baud_rate,timeout=0.5)
def up(self):
self.ser.write(up)
def down(self):
self.ser.write(down)
def left(self):
self.ser.write(left)
def right(self):
self.ser.write(right)
def stop(self):
self.ser.write(stop)
def close(self):
self.ser.close()
def write(self, data):
self.ser.write(data)
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,872
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/GUI.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
import sys
import time
import serial
import threading
import GetUserData
import GetSat
import GetLook
#Stop Tracking
def stop():
global stop
stop = 1
#Update Data
def update():
GetUserData.update("gui")
#Quit GUI & Shell
def quit():
global timer
if ( stop==0 or stop==1 ):
timer.cancel()
root.destroy()
#Passing time & max EL angle
'''
def passing(EL):
if EL>=5:
print "Passing Now ..."
var_pass_time.set("Passing Now ...")
if EL<5:
pass_time , maxEL = GetLook.GetPassData(tt,eciSat)
local_time = time.localtime(pass_time)
print "Next Passing Time:"+ time.asctime(local_time)
var_pass_time.set(str(local_time.tm_year)+"/"+str(local_time.tm_mon)+"/"+str(local_time.tm_mday)+" "+str(local_time.tm_hour)+":"+str(local_time.tm_min)+":"+str(local_time.tm_sec))
var_maxEL.set(str(maxEL))
var_pass_time = StringVar()
var_maxEL = StringVar()
'''
#
#Start Tracking
def start():
# i=language.get()
global stop
stop = 0
Sat=e1.get()
Lat=float(e2.get())
Lon=float(e3.get())
Alt=float(e4.get())
global ser
if mode.get() == 2 or mode.get() == 3 :
if sys.platform == "win32":
ser=serial.Serial("COM"+e5.get(),2400,timeout=0.5)
if sys.platform == "linux2":
ser=serial.Serial("/dev/ttyUSB"+e5.get(),2400,timeout=0.5)
print "You are tracking : " +str.upper(Sat)+"."
print "You are at Lat : " +str(Lat) +\
" Lon : " +str(Lon)+\
" Altitude :" +str(Alt)+"m"
line1,line2,Lat,Lon,Alt = GetUserData.get_user_data("gui",Sat,Lat,Lon,Alt)
GetSat.generate(line1,line2)
GetLook.generate(Lat,Lon,Alt)
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ,EL = GetLook.GetLook(tt,eciSat)
if EL>=5:
#pass_time , maxEL = GetLook.GetPassData(tt,eciSat)
print "Passing Now ..."
var_pass_time.set("Passing Now ...")
#var_maxEL.set(str(maxEL))
if EL<5:
pass_time , maxEL = GetLook.GetPassData(tt,eciSat)
local_time = time.localtime(pass_time)
print "Next Passing Time:" + time.asctime(local_time)
var_pass_time.set(str(local_time.tm_year)+"/"+str(local_time.tm_mon)+"/"+str(local_time.tm_mday)+" "+str(local_time.tm_hour)+":"+str(local_time.tm_min)+":"+str(local_time.tm_sec))
var_maxEL.set(str(maxEL))
#global timer
timer = threading.Timer(0.1, fun_timer)
timer.start()
#Language Choosen GUI
def fun_timer():
global timer
global ser
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ,EL = GetLook.GetLook(tt,eciSat)
#Screen Output
if mode.get() == 1 or mode.get()==3 :
AZ_flash.set(str(AZ))
EL_flash.set(str(EL))
#Serial Output
if mode.get() == 2 or mode.get() ==3 :
serial_str="AZ"+str(AZ)+" EL"+str(EL)+" Easycomm"
ser.write(serial_str)
if stop == 1:
if mode.get() == 2 or mode.get() == 3 :
ser.close()
#Stop fun_timer(y)
if stop == 0:
timer = threading.Timer(0.1, fun_timer)
timer.start()
#Main GUI
root = Tk()
root.title("WRI_Sat_Tracker")
root.wm_minsize(400, 360)
# if sys.platform == "win32":
# root.iconbitmap(sys.path[0]+"/radio.ico")
#Button
ButtonStart = Button( root, text="Start", command=start)
ButtonStop = Button( root, text="Stop", command=stop)
ButtonUpdate = Button( root, text="Update Data", command=update)
#ButtonUpdate = Button( root, text="Update Data", command=lambda:update)
ButtonStart.grid( row=0, column=0, padx=30, pady=5)
ButtonStop.grid( row=0, column=1, padx=20, pady=5)
ButtonUpdate.grid( row=0, column=2, padx=20, pady=5)
#UserData
Label(root, text="Sat Name:"). grid(row=1)
Label(root, text="Lat:"). grid(row=2)
Label(root, text="Lon:"). grid(row=3)
Label(root, text="Alt:"). grid(row=4)
Label(root, text="Serial COM"). grid(row=4, column=2)
global e1,e2,e3,e4,e5
e1 = Entry(root, width=10)
e2 = Entry(root, width=10)
e3 = Entry(root, width=10)
e4 = Entry(root, width=10)
e5 = Entry(root, width=10)
e1.grid(row=1, column=1, padx=20, pady=5)
e2.grid(row=2, column=1, padx=20, pady=5)
e3.grid(row=3, column=1, padx=20, pady=5)
e4.grid(row=4, column=1, padx=20, pady=5)
e5.grid(row=5, column=2, padx=20, pady=5)
e1.insert(0,"SO-50")
e2.insert(0,"30")
e3.insert(0,"30")
e4.insert(0,"0")
#Passing Situation
Label(root, text="Passing Time :").grid(row=8, column=0,columnspan=2)
Label(root, text="Max EL :").grid(row=10, column=0,columnspan=2)
global var_pass_time
global var_maxEL
var_pass_time = StringVar()
var_maxEL = StringVar()
Label(root, textvariable=var_pass_time).grid(row=9, column=0,columnspan=2)
Label(root, textvariable=var_maxEL).grid(row=11, column=0,columnspan=2)
#Joke
Label(root, text="Author : BG6WRI"). grid(row=7, column=2)
Label(root, text="From WITARC"). grid(row=8, column=2)
#ButtonJoke = Button(root, text="一键日卫星", command=lambda:var.set("和我没关系啊,你找OpenATS去"))
#ButtonJoke.grid(row=8, column=2)
#var = StringVar()
#Label(root, textvariable=var).grid( row=9, column=2)
#Output
#Scereen Ready
Label(root,text="AZ"). grid(row=5,column=0)
Label(root,text="EL"). grid(row=5,column=1)
global AZ_flash
global EL_flash
AZ_flash=IntVar()
AZ_flash.set("")
EL_flash=IntVar()
EL_flash.set("")
Entry(root, textvariable=AZ_flash, width=10).grid(row=6, column=0)
Entry(root, textvariable=EL_flash, width=10).grid(row=6, column=1)
#Mode Choose
group = LabelFrame(root, text="Output Mode", padx=5, pady=5)
group.grid(row=1, column=2, padx=10, pady=10, rowspan=3)
LANGS = [("Screen", 1), ("Serial", 2),("Both", 3)]
global mode
mode = IntVar()
mode.set(1)
for lang, num in LANGS:
b = Radiobutton(group, text=lang, variable=mode, value=num)
b.pack(anchor=W)
root.bind('<Escape>', lambda e: root.destroy())
root.protocol("WM_DELETE_WINDOW", quit)
root.mainloop()
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,873
|
zadsa/sgp4_sat_tracker
|
refs/heads/master
|
/about_Tracker/Track.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Not Completed
from Pelco_D import Tracker
from HMC5883L import HMC5883L
from MMA8452Q import MMA8452Q
import GetSat
import GetLook
import time
import math
import serial
#Change the System Time to Test.
#os.system('sudo date --s="2018-05-27 19:59:00"')
#---setup----
'''
#if you want to use serial.
#Windows
ser=serial.Serial("COM1",2400,timeout=0.5)
#Linux
ser=serial.Serial("/dev/ttyUSB0",2400,timeout=0.5)
'''
azimuth = HMC5883L()
elevation = MMA8452Q()
Tracker = Tracker("/dev/ttyUSB0", 2400)
#Tracker = Tracker("COM1")
#----------------Big Loop----------------
def goto(az,el):
AZ_now = azimuth.read() #azimuth
EL_now = elevation.read() #elevation
time.sleep(0.5)
def track(satname):
tt = time.time()
eciSat = GetSat.get_eciSat(tt)
AZ,EL = GetLook.GetLook(tt,eciSat)
AZ_now = azimuth.read() #azimuth
EL_now = elevation.read() #elevation
time.sleep(0.5)
def stop():
del azimuth
del elevation
Tracker
|
{"/server.py": ["/Pelco_D.py"], "/about_Tracker/Track.py": ["/Pelco_D.py"]}
|
39,874
|
Ajithbalakrishnan/FMODetect
|
refs/heads/master
|
/net_model.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import *
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow.keras.backend as K
import numpy as np
import cv2
import skimage.transform
import random
from dataset.helpers import *
import scipy
def conv_layer(x, n_filters, do_batchnorm = False, activation = None):
for kk in range(3):
x = Conv2D(n_filters, (3,3), activation = activation, padding='same')(x)
x = LeakyReLU(alpha=0.1)(x)
if do_batchnorm: x = BatchNormalization()(x)
return x
def get_model(input_bgr, n_gpus=1):
if input_bgr:
input_layer = Input(shape=(None, None, 6))
else:
input_layer = Input(shape=(None, None, 3))
## encoder
x = conv_layer(input_layer, 16)
x_skip1 = x
x = MaxPooling2D()(x)
x = conv_layer(x, 64)
x_skip2 = x
x = MaxPooling2D()(x)
x = conv_layer(x, 128)
x_skip3 = x
x = MaxPooling2D()(x)
x = conv_layer(x, 256)
x_skip4 = x
x = MaxPooling2D()(x)
## latent space
x = conv_layer(x, 256)
## decoder
x = Conv2DTranspose(128, kernel_size=(2,2), strides=(2,2))(x)
x = Concatenate()([x, x_skip4])
x = conv_layer(x, 128)
x = Conv2DTranspose(64, kernel_size=(2,2), strides=(2,2))(x)
x = Concatenate()([x, x_skip3])
x = conv_layer(x, 64)
x = Conv2DTranspose(32, kernel_size=(2,2), strides=(2,2))(x)
x = Concatenate()([x, x_skip2])
x = conv_layer(x, 32)
x = Conv2DTranspose(16, kernel_size=(2,2), strides=(2,2))(x)
x = Concatenate()([x, x_skip1])
x = conv_layer(x, 16)
x = Conv2D(4,(3,3), activation = None, padding='same')(x)
x = Conv2D(4,(3,3), activation = None, padding='same')(x)
x = Conv2D(1,(3,3), activation = None, padding='same')(x)
output_layer = x
model = Model(inputs = input_layer, outputs = output_layer)
if n_gpus > 1:
model = tf.keras.utils.multi_gpu_model(model, gpus=n_gpus)
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5)
model.compile(optimizer = optimizer, loss = custom_loss(input_layer),
metrics=["mae"], experimental_run_tf_function=False)
return model
def custom_loss(input_layer):
def fmo_loss_function(Yact, Ypred):
Ha = Yact[:,:,:,0]
H = Ypred[:,:,:,0]
### DT loss inverse
total_loss = K.mean(K.abs(H[Ha > 0] - Ha[Ha > 0])) + K.mean(K.abs(H[Ha==0] - 0))
return total_loss
return fmo_loss_function
def get_generator(h5_file, batch_size, all_subset, max_shape, input_bgr):
while True:
subset = random.sample(all_subset, batch_size)
X, Y = get_data(h5_file, subset, max_shape, input_bgr)
yield X, Y
def process_image(img, do_resize = True, shape = None):
if shape is None:
shape = img.shape[:2]
shape = [16 * (sh // 16) for sh in shape]
if do_resize:
img = skimage.transform.resize(img, shape, order=3)
else:
img = img[:shape[0],:shape[1]]
img = img - np.mean(img)
img = img / np.sqrt(np.var(img))
return img
def get_data(h5_file, subset, max_shape, input_bgr):
if input_bgr:
X = np.zeros([len(subset),max_shape[0],max_shape[1],6])
else:
X = np.zeros([len(subset),max_shape[0],max_shape[1],3])
Y = np.zeros([len(subset),max_shape[0],max_shape[1],1])
ki = 0
for k in subset:
fname = "%08d_" % (k)
if 'im' in h5_file[fname].keys():
I = skimage.transform.resize(h5_file[fname]['im'], max_shape, order=3)
I = process_image(I,False)
H = skimage.transform.resize(h5_file[fname]['psf'], max_shape, order=1)
M = skimage.transform.resize(h5_file[fname]['M'], max_shape, order=1)
rad = np.sqrt(np.sum(M))
DT = scipy.ndimage.morphology.distance_transform_edt(H == 0)
DT = DT / (2*rad)
DT[DT > 1] = 1
X[ki,:,:,:3] = I
if input_bgr:
B = skimage.transform.resize(h5_file[fname]['bgr'], max_shape, order=3)
B = process_image(B,False)
X[ki,:,:,3:] = B
Y[ki,:,:,0] = 1 - DT
else:
X[ki] = h5_file[fname]['X']
Y[ki] = h5_file[fname]['Y']
ki = ki + 1
return X,Y
def get_im(path):
I = cv2.imread(path)
return I
|
{"/run.py": ["/net_model.py"], "/train.py": ["/net_model.py"], "/run_fifa.py": ["/net_model.py"]}
|
39,875
|
Ajithbalakrishnan/FMODetect
|
refs/heads/master
|
/run.py
|
import argparse
import numpy as np
import tensorflow as tf
from net_model import *
import imageio
import os
import cv2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--video", required=False, default=None)
parser.add_argument("--im", required=False, default=None)
parser.add_argument("--bgr", required=False, default=None)
parser.add_argument("--model", required=False, default="FMODetect.h5")
parser.add_argument("--save", required=False, default="example")
parser.add_argument("--median", required=False, default=3)
return parser.parse_args()
def main():
args = parse_args()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = tf.keras.models.load_model(args.model, custom_objects={ 'fmo_loss_function': custom_loss(None) })
if args.video is not None:
## estimate initial background
Ims = []
cap = cv2.VideoCapture(args.video)
while cap.isOpened():
ret, frame = cap.read()
Ims.append(frame)
if len(Ims) >= args.median:
break
B = np.median(np.asarray(Ims)/255, 0)[:,:,[2,1,0]]
## run FMODetect
shape = process_image(B).shape
out = cv2.VideoWriter(os.path.join(args.save, 'detections.avi'),cv2.VideoWriter_fourcc(*"MJPG"), 6, (shape[1], shape[0]),True)
frmi = 0
while cap.isOpened():
if frmi < args.median:
frame = Ims[frmi]
else:
ret, frame = cap.read()
if not ret:
break
Ims = Ims[1:]
Ims.append(frame)
## update background (running median)
B = np.median(np.asarray(Ims)/255, 0)[:,:,[2,1,0]]
frmi += 1
I = process_image(frame[:,:,[2,1,0]]/255)
X = np.concatenate((I,process_image(B)),2)[None]
predictions = model.predict(X)
predictions[predictions < 0] = 0
predictions[predictions > 1] = 1
Io = I - I.min()
Io = Io / Io.max()
# out.write( (predictions[0][:,:,[0,0,0]] * 255).astype(np.uint8) )
out.write( (predictions[0][:,:,[0,0,0]]*Io[:,:,[2,1,0]] * 255).astype(np.uint8) )
cap.release()
out.release()
else:
if args.im is None:
ims = []
bgrs = []
for ss in range(2):
ims.append(os.path.join('example','ex{:1d}_im.png'.format(ss)))
bgrs.append(os.path.join('example','ex{:1d}_bgr.png'.format(ss)))
else:
ims = [args.im]
bgrs = [args.bgr]
for ss in range(len(ims)):
I = process_image(get_im(ims[ss]))
B = process_image(get_im(bgrs[ss]))
X = np.concatenate((I,B),2)[None]
predictions = model.predict(X)
predictions[predictions < 0] = 0
predictions[predictions > 1] = 1
imageio.imwrite(os.path.join(args.save,"ex{:1d}_tdf.png".format(ss)), predictions[0])
Io = I - I.min()
Io = Io / Io.max()
imageio.imwrite(os.path.join(args.save,"ex{:1d}_tdfim.png".format(ss)), predictions[0][:,:,[0,0,0]]*Io[:,:,[2,1,0]])
if __name__ == "__main__":
main()
|
{"/run.py": ["/net_model.py"], "/train.py": ["/net_model.py"], "/run_fifa.py": ["/net_model.py"]}
|
39,876
|
Ajithbalakrishnan/FMODetect
|
refs/heads/master
|
/train.py
|
import glob
import argparse
import numpy as np
import random
import tensorflow as tf
from tensorflow import keras
from net_model import *
import imageio
import os
from os.path import isfile, join, isdir
import shutil
import datetime
import h5py
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", required=True)
parser.add_argument("--model_path", required=True)
return parser.parse_args()
def main():
EPOCHS = 300
BATCH_SIZE = 32
PERC_TRAIN = 0.96
input_bgr = True
max_shape = [256, 512]
n_gpus = 1
folder_name = '{date:%Y%m%d_%H%M}'.format( date=datetime.datetime.now())
args = parse_args()
h5_file = h5py.File(args.dataset_path, 'r', swmr=True)
dataset_size = len(h5_file.keys())
print(tf.__version__)
train_size = round(dataset_size*PERC_TRAIN)
model = get_model(input_bgr, n_gpus)
os.makedirs(join(args.model_path,folder_name))
os.makedirs(join(args.model_path,folder_name,"eval"))
os.makedirs(join(args.model_path,folder_name,"models"))
keras.utils.plot_model(model, join(args.model_path,folder_name,'model.png'), show_shapes=True)
model.summary()
for layer in model.layers:
print(layer.name, ':', layer.trainable)
log_dir = join(args.model_path,folder_name,"logs")
shutil.rmtree(log_dir, ignore_errors=True)
tensorboard_cbk = keras.callbacks.TensorBoard(log_dir=log_dir)
model_saver = keras.callbacks.ModelCheckpoint(filepath=join(args.model_path,folder_name,"models","best_model.h5"),save_best_only=True,monitor='val_loss', mode='min', verbose=1)
earlystopper = keras.callbacks.EarlyStopping(monitor='val_loss',mode='min',patience=50, verbose=1)
history = model.fit_generator(get_generator(h5_file, BATCH_SIZE, range(train_size), max_shape, input_bgr),
validation_data=get_generator(h5_file, BATCH_SIZE, range(train_size,dataset_size), max_shape, input_bgr), steps_per_epoch=train_size/BATCH_SIZE, validation_steps=1, epochs=EPOCHS, callbacks=[tensorboard_cbk,model_saver,earlystopper])
model.save(join(args.model_path,folder_name,'models','final_model.h5'))
print('\nhistory dict:', history.history)
subset = [0,3,5,900,902,990,995]
valX, valY = get_data(h5_file, subset, max_shape, input_bgr)
predictions = model.predict(valX)
Xim = get_im(dset_im, subset)
for ti in range(predictions.shape[0]):
fname = "%08d_" % (ti)
im = Xim[ti,:,:,:3]
H = predictions[ti,:,:,0]
npmax = np.max(H)
if npmax == 0:
npmax = 1
imageio.imwrite(join(args.model_path,folder_name,"eval",fname+"im.png"), (255*im).astype(np.uint8))
imageio.imwrite(join(args.model_path,folder_name,"eval",fname+"psf.png"), (255*(H/npmax)).astype(np.uint8))
# imageio.imwrite(join(args.model_path,"eval",fname+"bgr.png"), (255*B).astype(np.uint8))
# imageio.imwrite(join(args.model_path,"eval",fname+"F.png"), (255*Fsave).astype(np.uint8))
# imageio.imwrite(join(args.model_path,"eval",fname+"M.png"), (255*M).astype(np.uint8))
if __name__ == "__main__":
main()
|
{"/run.py": ["/net_model.py"], "/train.py": ["/net_model.py"], "/run_fifa.py": ["/net_model.py"]}
|
39,877
|
Ajithbalakrishnan/FMODetect
|
refs/heads/master
|
/run_fifa.py
|
import argparse
import numpy as np
import tensorflow as tf
from net_model import *
import imageio
import os
import cv2
import skimage
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--video", required=False, default=None)
parser.add_argument("--model", required=False, default="FMODetect.h5")
parser.add_argument("--save", required=False, default="example")
parser.add_argument("--median", required=False, default=3)
parser.add_argument("--average", required=False, default=True)
return parser.parse_args()
def interpolate_fifa(im):
im0 = im.copy()
im0[1:-1:2] = (im0[:-2:2] + im0[2::2])/2
im1 = im.copy()
im1[2:-2:2] = (im1[1:-3:2] + im1[3:-1:2])/2
return im0, im1
def get_frame(frame, inc_res = 2):
sh = frame.shape[:2]
sh0 = int(sh[0]/6)
sh1 = int(sh[1]/6)
# frame_crop = frame
frame_crop = frame[2*sh0:-3*sh0,2*sh1:-3*sh1]
frame_crop = skimage.transform.resize(frame_crop, (frame_crop.shape[0]*inc_res, frame_crop.shape[1]*inc_res), order=3)
return frame_crop
def main():
args = parse_args()
if not os.path.exists(args.save):
os.makedirs(args.save)
model = tf.keras.models.load_model(args.model, custom_objects={ 'fmo_loss_function': custom_loss(None) })
## estimate initial background
Ims = []
cap = cv2.VideoCapture(args.video)
while cap.isOpened():
ret, frame = cap.read()
if not frame is None:
frame = frame / 255
frame0, frame1 = interpolate_fifa(frame)
if args.average:
Ims.append(get_frame((frame0 + frame1)/2))
else:
Ims.append(get_frame(frame0))
if len(Ims) < args.median:
Ims.append(get_frame(frame1))
if len(Ims) >= args.median:
break
B = np.median(np.asarray(Ims)/255, 0)[:,:,[2,1,0]]
## run FMODetect
shape = process_image(B).shape
out = cv2.VideoWriter(os.path.join(args.save, 'detections.avi'),cv2.VideoWriter_fourcc(*"MJPG"), 6, (shape[1], shape[0]),True)
frmi = 0
frame1 = None
while cap.isOpened():
if frmi < args.median:
frame = Ims[frmi]
else:
if frame1 is None:
ret, frame = cap.read()
if not ret:
break
frame = frame / 255
frame0, frame1 = interpolate_fifa(frame)
if args.average:
frame = get_frame( (frame0 + frame1)/2 )
frame1 = None
else:
frame = get_frame(frame0)
else:
frame = get_frame(frame1)
frame1 = None
Ims = Ims[1:]
Ims.append(frame)
## update background (running median)
B = np.median(np.asarray(Ims), 0)[:,:,[2,1,0]]
frmi += 1
if args.average:
mult = 1
else:
mult = 2
if frmi < mult*88:
continue
I = process_image(frame[:,:,[2,1,0]])
X = np.concatenate((I,process_image(B)),2)[None]
predictions = model.predict(X)
predictions[predictions < 0] = 0
predictions[predictions > 1] = 1
Io = I - I.min()
Io = Io / Io.max()
imageio.imwrite('tmpi.png',frame[:,:,[2,1,0]])
imageio.imwrite('tmpb.png',B)
imageio.imwrite('tmpo.png',predictions[0][:,:,[0,0,0]])
breakpoint()
# out.write( (predictions[0][:,:,[0,0,0]] * 255).astype(np.uint8) )
out.write( (predictions[0][:,:,[0,0,0]]*Io[:,:,[2,1,0]]).astype(np.uint8) )
cap.release()
out.release()
if __name__ == "__main__":
main()
|
{"/run.py": ["/net_model.py"], "/train.py": ["/net_model.py"], "/run_fifa.py": ["/net_model.py"]}
|
39,878
|
MagicFame/ArtScore
|
refs/heads/master
|
/Genre.py
|
from random import randrange
class Genre:
# constructeur
def __init__(self, nom):
self.repartitionDoMajeur = 0
self.repartitionDoMineur = 0
self.repartitionLaMineur = 0
self.repartitionSolMajeur = 0
self.repartitionMiMineur = 0
self.repartitionReMajeur = 0
self.repartitionSiMineur = 0
self.repartitionLaMajeur = 0
self.repartitionFaDMineur = 0
self.repartitionMiMajeur = 0
self.repartitionDoDMineur = 0
self.repartitionSiMajeur = 0
self.repartitionSolDMineur = 0
self.repartitionFaDMajeur = 0
self.repartitionReDMineur = 0
self.repartitionDoDMajeur = 0
self.repartitionLaDMineur = 0
self.repartitionFaMajeur = 0
self.repartitionReMineur = 0
self.repartitionLaDMajeur = 0
self.repartitionSolMineur = 0
self.repartitionReDMajeur = 0
self.repartitionSolDMajeur = 0
self.repartitionFaMineur = 0
self.dissonnanceTotale = 0
self.repartitionTotale = 0
self.nom = nom
self.notes = []
self.gam = ""
# Calcul du nombre d'occurence d'une gamme pour un genre donné
def addGamme(self, nom):
if nom == "Do majeur":
self.repartitionDoMajeur = self.repartitionDoMajeur + 1
if nom == "La mineur":
self.repartitionLaMineur = self.repartitionLaMineur + 1
if nom == "Sol majeur":
self.repartitionSolMajeur = self.repartitionSolMajeur + 1
if nom == "Mi mineur":
self.repartitionMiMineur = self.repartitionMiMineur + 1
if nom == "Re majeur":
self.repartitionReMajeur = self.repartitionReMajeur + 1
if nom == "Si mineur":
self.repartitionSiMineur = self.repartitionSiMineur + 1
if nom == "La majeur":
self.repartitionLaMajeur = self.repartitionLaMajeur + 1
if nom == "Fa# mineur":
self.repartitionFaDMineur = self.repartitionFaDMineur + 1
if nom == "Mi majeur":
self.repartitionMiMajeur = self.repartitionMiMajeur + 1
if nom == "Do# mineur":
self.repartitionDoDMineur = self.repartitionDoDMineur + 1
if nom == "Si majeur":
self.repartitionSiMajeur = self.repartitionSiMajeur + 1
if nom == "Sol# mineur":
self.repartitionSolDMineur = self.repartitionSolDMineur + 1
if nom == "Fa# majeur":
self.repartitionFaDMajeur = self.repartitionFaDMajeur + 1
if nom == "Re# mineur":
self.repartitionReDMineur = self.repartitionReDMineur + 1
if nom == "Do# majeur":
self.repartitionDoDMajeur = self.repartitionDoDMajeur + 1
if nom == "La# mineur":
self.repartitionLaDMineur = self.repartitionLaDMineur + 1
if nom == "Fa majeur":
self.repartitionFaMajeur = self.repartitionFaMajeur + 1
if nom == "Re mineur":
self.repartitionReMineur = self.repartitionReMineur + 1
if nom == "La# majeur":
self.repartitionLaDMajeur = self.repartitionLaDMajeur + 1
if nom == "Sol mineur":
self.repartitionSolMineur = self.repartitionSolMineur + 1
if nom == "Re# majeur":
self.repartitionReDMajeur = self.repartitionReDMajeur + 1
if nom == "Do mineur":
self.repartitionDoDMineur = self.repartitionDoDMineur + 1
if nom == "Sol# majeur":
self.repartitionSolDMajeur = self.repartitionSolDMajeur + 1
if nom == "Fa mineur":
self.repartitionFaMineur = self.repartitionFaMineur + 1
# Calcul de la dissonnance totale pour un genre donné
def addDissonance(self, value):
self.dissonnanceTotale += value
# Calcul de la répartition pour un genre (Note la plus élevée - la plus basse)
def addRepartition(self, value):
self.repartitionTotale += value
# Affichage des caracteristiques propres à un genre
def afficherInfoGenre(self):
print("\n \n \n***************************** \nStyle selectionné : Classique")
print("Gamme répartition Do majeur : ", self.repartitionDoMajeur, "Do mineur : ", self.repartitionDoMineur,
"La mineur: ", self.repartitionLaMineur, "Sol majeur: ", self.repartitionSolMajeur, "Mi mineur:",
self.repartitionMiMineur, "Re majeur : ", self.repartitionReMajeur, "Si mineur", self.repartitionSiMineur,
"La majeur : ", self.repartitionLaMajeur, "Fa# mineur: ", self.repartitionFaDMineur,
"Mi majeur: ", self.repartitionMiMajeur, "Do# mineur:", self.repartitionDoDMineur, "Si majeur : ",
self.repartitionSiMajeur, "Sol# mineur", self.repartitionSolDMineur, "Fa# majeur : ",
self.repartitionFaDMajeur, "\nRe# mineur : ", self.repartitionReDMineur, "Do# majeur: ",
self.repartitionDoDMajeur,
"La# mineur: ", self.repartitionLaDMineur, "Fa majeur:", self.repartitionFaMajeur, "Re mineur : ",
self.repartitionReMineur, "La# majeur", self.repartitionLaDMajeur,
"Sol mineur : ", self.repartitionSolMineur, "Re# majeur", self.repartitionReDMajeur, "Sol# majeur",
self.repartitionSolDMajeur, "Fa mineur", self.repartitionFaMineur)
print("\nDissonnance moyenne :", self.dissonnanceTotale / 360)
print("\nRepartition moyenne", self.repartitionTotale / 360, "\n\n***************************** \n\n")
# Selectionne un genre en fonction des pourcentages
def chooseGenre(self):
gamme = []
repartition = [self.repartitionDoMajeur, self.repartitionDoMineur, self.repartitionLaMineur,
self.repartitionSolMajeur, self.repartitionMiMineur, self.repartitionReMajeur,
self.repartitionSiMineur, self.repartitionLaMajeur, self.repartitionFaDMineur,
self.repartitionMiMajeur, self.repartitionDoDMineur, self.repartitionSiMajeur,
self.repartitionSolDMineur, self.repartitionFaDMajeur, self.repartitionReDMineur,
self.repartitionDoDMajeur, self.repartitionLaDMineur, self.repartitionFaMajeur,
self.repartitionReMineur, self.repartitionLaDMajeur, self.repartitionSolMineur,
self.repartitionReDMajeur, self.repartitionSolDMajeur, self.repartitionFaMineur]
nomgamme = ["Do majeur", "Do mineur", "La mineur", "Sol majeur", "Mi mineur", "Re majeur", "Si mineur",
"La majeur", "Fa# mineur", "Mi majeur", "Do# mineur", "Si majeur", "Sol# mineur", "Fa# majeur",
"Re# mineur", "Do# majeur", "La# mineur", "Fa majeur", "Re mineur", "La# majeur", "Sol mineur",
"Re# majeur", "Sol# majeur", "Fa mineur"]
for i in range(0, 6):
value = repartition.index(max(repartition))
gamme.append(nomgamme[value])
del repartition[value]
del nomgamme[value]
print(gamme)
nombreAleatoire = randrange(1, 7)
if nombreAleatoire == 1:
return gamme[0]
if nombreAleatoire == 2:
return gamme[1]
if nombreAleatoire == 3:
return gamme[2]
if nombreAleatoire == 4:
return gamme[3]
if nombreAleatoire == 5:
return gamme[4]
if nombreAleatoire == 6:
return gamme[5]
# Calcul de la répartition moyenne
def repartitionMoyenne(self):
return self.dissonnanceTotale / 360
# Calcul de la dissonnace moyenne
def dissonanceMoyenne(self):
return self.repartitionTotale / 360
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,879
|
MagicFame/ArtScore
|
refs/heads/master
|
/Gamme.py
|
class Gamme:
# Constructeur
def __init__(self, nom, note):
self.nom = nom
self.notes = list(note)
# Retourne le nom de la gamme
def getNom(self):
return self.nom
# Retourne les notes de la gamme
def getNotes(self):
return self.notes
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,880
|
MagicFame/ArtScore
|
refs/heads/master
|
/main.py
|
from tkinter import *
import pygame
from mido import MidiFile
import MarkovGen as MarkovGen
from AleatoireGen import AleatoireGen
from Genre import Genre
from MarkovGen import MarkovGen
# REGARDER LA DOC : https://mido.readthedocs.io/en/latest/index.html#
# Algorithme de tri afin d'avoir les notes de la gamme trié par ordre de Do à Si
def trier(liste):
gamme = []
if "Do" in liste:
gamme.append("Do")
if "Do#" in liste:
gamme.append("Do#")
if "Re" in liste:
gamme.append("Re")
if "Re#" in liste:
gamme.append("Re#")
if "Mi" in liste:
gamme.append("Mi")
if "Fa" in liste:
gamme.append("Fa")
if "Fa#" in liste:
gamme.append("Fa#")
if "Sol" in liste:
gamme.append("Sol")
if "Sol#" in liste:
gamme.append("Sol#")
if "La" in liste:
gamme.append("La")
if "La#" in liste:
gamme.append("La#")
if "Si" in liste:
gamme.append("Si")
return gamme
# Detecte la différence entre la plus haute note et la plus basse note
def detectRepartition():
basse = int(notes[0])
haute = int(notes[0])
for n in notes:
if int(n) > haute:
haute = int(n)
elif int(n) < basse:
basse = int(n)
print("Note haute :", haute, " et note basse : ", basse)
classique.addRepartition(haute - basse)
return haute - basse
# Algorithme de détection de gamme basé sur les probabilitées : (Explication exacte sur Excel)
def detectGamme():
gamme = []
comptdo = 0
comptdod = 0
comptre = 0
comptred = 0
comptmi = 0
comptfa = 0
comptfad = 0
comptsol = 0
comptsold = 0
comptla = 0
comptlad = 0
comptsi = 0
for n in notes:
if int(n) % 12 == 0:
comptdo += 1
elif int(n) % 12 == 1:
comptdod += 1
elif int(n) % 12 == 2:
comptre += 1
elif int(n) % 12 == 3:
comptred += 1
elif int(n) % 12 == 4:
comptmi += 1
elif int(n) % 12 == 5:
comptfa += 1
elif int(n) % 12 == 6:
comptfad += 1
elif int(n) % 12 == 7:
comptsol += 1
elif int(n) % 12 == 8:
comptsold += 1
elif int(n) % 12 == 9:
comptla += 1
elif int(n) % 12 == 10:
comptlad += 1
elif int(n) % 12 == 11:
comptsi += 1
initial = (int(notes[0]) % 12)
print("DO : ", comptdo, " DO# :", comptdod, " RE :", comptre, " RE# :", comptred, " MI :", comptmi, " FA :",
comptfa, " FA# :", comptfad, " SOL :", comptsol, " SOL# :", comptsold, " LA :", comptla, " LA# :", comptlad,
" SI :", comptsi)
notesTotales = comptdo + comptdod + comptre + comptred + comptmi + comptfa + comptfad + comptsol + comptsold + \
comptla + comptlad + comptsi
freqDo = round((comptdo / notesTotales) * 100)
freqDod = round((comptdod / notesTotales) * 100)
freqRe = round((comptre / notesTotales) * 100)
freqRed = round((comptred / notesTotales) * 100)
freqMi = round((comptmi / notesTotales) * 100)
freqFa = round((comptfa / notesTotales) * 100)
freqFad = round((comptfad / notesTotales) * 100)
freqSol = round((comptsol / notesTotales) * 100)
freqSold = round((comptsold / notesTotales) * 100)
freqLa = round((comptla / notesTotales) * 100)
freqLad = round((comptlad / notesTotales) * 100)
freqSi = round((comptsi / notesTotales) * 100)
print("DO : ", freqDo, "% DO# :", freqDod, "% RE :", freqRe, "% RE# :", freqRed, "% MI :", freqMi, "% FA :",
freqFa, "% FA# :", freqFad, "% SOL :", freqSol, "% SOL# :", freqSold, "% LA :", freqLa, "% LA# :", freqLad,
"% SI :", freqSi)
if freqDo >= 10:
gamme.append("Do")
if freqDod >= 10:
gamme.append("Do#")
if freqRe >= 10:
gamme.append("Re")
if freqRed >= 10:
gamme.append("Re#")
if freqMi >= 10:
gamme.append("Mi")
if freqFa >= 10:
gamme.append("Fa")
if freqFad >= 10:
gamme.append("Fa#")
if freqSol >= 10:
gamme.append("Sol")
if freqSold >= 10:
gamme.append("Sol#")
if freqLa >= 10:
gamme.append("La")
if freqLad >= 10:
gamme.append("La#")
if freqSi >= 10:
gamme.append("Si")
print(gamme)
if "Do" not in gamme and "Do#" not in gamme:
if freqDo >= freqDod:
gamme.insert(0, "Do")
else:
gamme.insert(0, "Do#")
if "Do#" not in gamme and "Re" not in gamme:
if freqDod >= freqRe:
gamme.insert(0, "Do#")
else:
gamme.insert(1, "Re")
if "Re" not in gamme and "Re#" not in gamme:
if freqRe >= freqRed:
gamme.insert(1, "Re")
else:
gamme.insert(1, "Re#")
if "Re#" not in gamme and "Mi" not in gamme:
if freqRed >= freqMi:
gamme.insert(2, "Re#")
else:
gamme.insert(2, "Mi")
if "Mi" not in gamme and "Fa" not in gamme:
if freqMi >= freqFa:
gamme.insert(2, "Mi")
else:
gamme.insert(3, "Fa")
if "Fa" not in gamme and "Fa#" not in gamme:
if freqFa >= freqFad:
gamme.insert(3, "Fa")
else:
gamme.insert(3, "Fa#")
if "Fa#" not in gamme and "Sol" not in gamme:
if freqFad >= freqSol:
gamme.insert(4, "Fa#")
else:
gamme.insert(4, "Sol")
if "Sol" not in gamme and "Sol#" not in gamme:
if freqSol >= freqSold:
gamme.insert(4, "Sol")
else:
gamme.insert(5, "Sol#")
if "Sol#" not in gamme and "La" not in gamme:
if freqSold >= freqLa:
gamme.insert(5, "Sol#")
else:
gamme.insert(5, "La")
if "La" not in gamme and "La#" not in gamme:
if freqLa >= freqLad:
gamme.insert(6, "La")
else:
gamme.insert(6, "La#")
if "La#" not in gamme and "Si" not in gamme:
if freqLad >= freqSi:
gamme.insert(6, "La#")
else:
gamme.insert(7, "Si")
liste = trier(gamme)
print(liste)
# Comparaison afin d'avoir le taux de notes différentes de la gamme
# On compare donc toutes les notes avec les notes de la liste (gamme)
freqDissonnance = 0
if "Do" not in liste:
freqDissonnance += freqDo
if "Do#" not in liste:
freqDissonnance += freqDod
if "Re" not in liste:
freqDissonnance += freqRe
if "Re#" not in liste:
freqDissonnance += freqRed
if "Mi" not in liste:
freqDissonnance += freqMi
if "Fa" not in liste:
freqDissonnance += freqFa
if "Fa#" not in liste:
freqDissonnance += freqFad
if "Sol" not in liste:
freqDissonnance += freqSol
if "Sol#" not in liste:
freqDissonnance += freqSold
if "La" not in liste:
freqDissonnance += freqLa
if "La#" not in liste:
freqDissonnance += freqLad
if "Si" not in liste:
freqDissonnance += freqSi
print("Fréquence dissonnance : ", freqDissonnance, "%")
classique.addDissonance(freqDissonnance)
# On détermine alors le nom exact de la gamme
gamme = "Rien"
if "Do" in liste and "Re" in liste and "Mi" in liste and "Fa" in liste and "Sol" in liste and "La" in liste and "Si" in liste:
if freqDo > freqLa:
gamme = "Do majeur"
elif freqLa > freqDo:
gamme = "La mineur"
elif initial == 0:
gamme = "Do majeur"
else:
gamme = "La mineur"
elif "Do" in liste and "Re" in liste and "Mi" in liste and "Fa#" in liste and "Sol" in liste and "La" in liste and "Si" in liste:
if freqSol > freqMi:
gamme = "Sol majeur"
elif freqMi > freqSol:
gamme = "Mi mineur"
elif initial == 4:
gamme = "Mi mineur"
else:
gamme = "Sol majeur"
elif "Do#" in liste and "Re" in liste and "Mi" in liste and "Fa#" in liste and "Sol" in liste and "La" in liste and "Si" in liste:
if freqRe > freqSi:
gamme = "Re majeur"
elif freqSi > freqRe:
gamme = "Si mineur"
elif initial == 2:
gamme = "Re majeur"
else:
gamme = "Si mineur"
elif "Do#" in liste and "Re" in liste and "Mi" in liste and "Fa#" in liste and "Sol#" in liste and "La" in liste and "Si" in liste:
if freqLa > freqFad:
gamme = "La majeur"
elif freqFad > freqLa:
gamme = "Fa# mineur"
elif initial == 9:
gamme = "La majeur"
else:
gamme = "Fa# mineur"
elif "Do#" in liste and "Re#" in liste and "Mi" in liste and "Fa#" in liste and "Sol#" in liste and "La" in liste and "Si" in liste:
if freqMi > freqDod:
gamme = "Mi majeur"
elif freqDod > freqMi:
gamme = "Do# mineur"
elif initial == 4:
gamme = "Mi majeur"
else:
gamme = "Do# mineur"
elif "Do#" in liste and "Re#" in liste and "Mi" in liste and "Fa#" in liste and "Sol#" in liste and "La#" in liste and "Si" in liste:
if freqSi > freqSold:
gamme = "Si majeur"
elif freqSold > freqSi:
gamme = "Sol# mineur"
elif initial == 11:
gamme = "Si majeur"
else:
gamme = "Sol# mineur"
elif "Do#" in liste and "Re#" in liste and "Fa" in liste and "Fa#" in liste and "Sol#" in liste and "La#" in liste and "Si" in liste:
if freqFad > freqRed:
gamme = "Fa# majeur"
elif freqRed > freqFad:
gamme = "Re# mineur"
elif initial == 6:
gamme = "Fa# majeur"
else:
gamme = "Re# mineur"
elif "Do" in liste and "Do#" in liste and "Re#" in liste and "Fa" in liste and "Fa#" in liste and "Sol#" in liste and "La#" in liste:
if freqDod > freqLad:
gamme = "Do# majeur"
elif freqLad > freqDod:
gamme = "La# mineur"
elif initial == 1:
gamme = "Do# majeur"
else:
gamme = "La# mineur"
elif "Do" in liste and "Re" in liste and "Mi" in liste and "Fa" in liste and "Sol" in liste and "La" in liste and "La#" in liste:
if freqFa > freqRe:
gamme = "Fa majeur"
elif freqRe > freqFa:
gamme = "Re mineur"
elif initial == 5:
gamme = "Fa majeur"
else:
gamme = "Re mineur"
elif "Do" in liste and "Re" in liste and "Re#" in liste and "Fa" in liste and "Sol" in liste and "La" in liste and "La#" in liste:
if freqLad > freqSol:
gamme = "La# majeur"
elif freqSol > freqLad:
gamme = "Sol mineur"
elif initial == 10:
gamme = "La# majeur"
else:
gamme = "Sol mineur"
elif "Do" in liste and "Re" in liste and "Re#" in liste and "Fa" in liste and "Sol" in liste and "Sol#" in liste and "La#" in liste:
if freqRed > freqDo:
gamme = "Re# majeur"
elif freqDo > freqRed:
gamme = "Do mineur"
elif initial == 0:
gamme = "Do mineur"
else:
gamme = "Re# majeur"
elif "Do" in liste and "Do#" in liste and "Re#" in liste and "Fa" in liste and "Sol" in liste and "Sol#" in liste and "La#" in liste:
if freqSold > freqFa:
gamme = "Sol# majeur"
elif freqFa > freqSold:
gamme = "Fa mineur"
elif initial == 5:
gamme = "Fa mineur"
else:
gamme = "Sol# majeur"
print("La gamme est :", gamme)
classique.addGamme(gamme)
return gamme
# Algorithme de création des tableaux de markov (tableau de probabilitées)
def detectMarkov():
cpt = 0
for cpt in range(len(notes) - 1):
if 48 <= int(notes[cpt]) <= 83 and 48 <= int(notes[cpt + 1]) <= 83 and notes[cpt] != notes[cpt + 1]:
# notes faisant partie de la création
markov[int(notes[cpt]) - 48][int(notes[cpt + 1]) - 48] += 1
# Fonction appel les algorithmes de génération en fonction de la méthode et qui lance la lecture
# directement à la création
def generer(methode):
bottom.pack(padx=30, pady=30)
valider.config(state=NORMAL)
global methodeGeneree
methodeGeneree = methode
# ---> A PARTIR D'ICI VA VERS algo de generation en fonction de la methode
if methodeGeneree == "aleatoire":
Generation = AleatoireGen(classique)
pygame.mixer.music.load("random.mid")
pygame.mixer.music.play()
elif methodeGeneree == "markov":
Markov = MarkovGen(classique, markov, markov2)
pygame.mixer.music.load("markov.mid")
pygame.mixer.music.play()
# Fonction pour la notation à réaliser !!
def noter():
valider.config(state=DISABLED)
# pour chaque génération on ne peut donner qu'une seule note
print(methodeGeneree)
gammerate = int(gamme_s.get())
hauteur = int(hauteur_s.get())
rytme = int(rytme_s.get())
print("Note sur la gamme " + str(gammerate))
print("Note sur la différence de hauteur entre deux notes " + str(hauteur))
print("Note sur le rythme" + str(rytme))
# définition de la fenêtre principale
fenetre = Tk()
fenetre.title("ArtScore")
titre = Label(fenetre, text="ArtScore")
titre.pack()
# définition des variables
methodeGeneree = "Aucune"
markov = [] # tableau de markov notes
compt = 0
markov2 = [] # tableau de markov temps
# initialisation du vecteur 2d markov
for i in range(36):
markov.append([0] * 36)
# création du genre Classique
classique = Genre("Classique")
# initialisation de pygame (lecture fichier midi sur app)
pygame.init()
# boucle permettant de parcourir les 360 morceaux classique avec affichage des informations pour chaque morceau
for compt in range(1, 360):
morceau = "n (" + str(compt) + ").mid"
path = "Classique/" + morceau
# Possible d'essayer avec les morceaux Jazz mais penser à changer le path "Jazz/" et la boucle qui s'arrête à 52
print("Morceau actuel :", path)
mid = MidiFile(path)
notes = []
for i, track in enumerate(mid.tracks):
print('Track {}: {}'.format(i, track.name))
for msg in track:
if msg.type == 'note_on':
a, b, c, d, e = str(msg).split(" ")
g, note = str(c).split("=")
notes.append(note)
detectMarkov()
gamme = detectGamme()
print("La différence entre la note la plus haute et la plus basse est :", detectRepartition())
# Affichage console de toutes les informations propres au genre
classique.afficherInfoGenre()
# Affichage console du tableau de Markov
print("CHAINE DE MARKOV (comptage)")
for i in range(36):
print(i, "|", markov[i])
# affichage du cadre du haut (choix de la méthode)
cadre = Frame(fenetre, width=768, height=576, borderwidth=2, relief=GROOVE)
cadre.pack(padx=30, pady=30)
label = Label(cadre, text="Méthodes de génération de musiques :")
label.pack(padx=10, pady=10)
Button(cadre, text='Aléatoire', command=lambda: generer('aleatoire'), padx=10, pady=5).pack(side=LEFT, padx=50, pady=20)
Button(cadre, text='Chaînes de Markov', command=lambda: generer('markov'), padx=10, pady=5).pack(side=RIGHT, padx=50,
pady=20)
# affichage du cadre du bas (Rating)
bottom = Frame(fenetre, borderwidth=2, relief=GROOVE)
labelbottom = Label(bottom, text="Écoutez le morceau puis donnez votre avis :")
labelbottom.pack(padx=10, pady=10)
gamme_s = Spinbox(bottom, from_=0, to=5)
Label(bottom, text="Gamme :").pack(pady=5)
gamme_s.pack()
hauteur_s = Spinbox(bottom, from_=0, to=5)
Label(bottom, text="Différence de hauteur :").pack(pady=5)
hauteur_s.pack()
rytme_s = Spinbox(bottom, from_=0, to=5)
Label(bottom, text="Rythmique :").pack(pady=5)
rytme_s.pack()
valider = Button(bottom, text='Valider', command=noter, padx=10, pady=5)
valider.pack(padx=50, pady=20)
fenetre.mainloop()
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,881
|
MagicFame/ArtScore
|
refs/heads/master
|
/MarkovGen.py
|
import os
from random import randrange
import numpy as np
from EasyMIDI import EasyMIDI, Track, Note
from Gamme import Gamme
from Genre import Genre
class MarkovGen:
# Constructeur
def __init__(self, genre, markov1, markov2):
### Definition des notes
self.c2 = Note('C', octave=3, duration=1 / 4, volume=100) # DO2
self.c2d = Note('C#', octave=3, duration=1 / 4, volume=100)
self.d2 = Note('D', octave=3, duration=1 / 4, volume=100)
self.d2d = Note('D#', octave=3, duration=1 / 4, volume=100)
self.e2 = Note('E', octave=3, duration=1 / 4, volume=100)
self.f2 = Note('F', octave=3, duration=1 / 4, volume=100)
self.f2d = Note('F#', octave=3, duration=1 / 4, volume=100)
self.g2 = Note('G', octave=3, duration=1 / 4, volume=100)
self.g2d = Note('G#', octave=3, duration=1 / 4, volume=100)
self.a2 = Note('A', octave=3, duration=1 / 4, volume=100)
self.a2d = Note('A#', octave=3, duration=1 / 4, volume=100)
self.b2 = Note('B', octave=3, duration=1 / 4, volume=100)
self.c3 = Note('C', octave=4, duration=1 / 4, volume=100) # DO3
self.c3d = Note('C#', octave=4, duration=1 / 4, volume=100)
self.d3 = Note('D', octave=4, duration=1 / 4, volume=100)
self.d3d = Note('D#', octave=4, duration=1 / 4, volume=100)
self.e3 = Note('E', octave=4, duration=1 / 4, volume=100)
self.f3 = Note('F', octave=4, duration=1 / 4, volume=100)
self.f3d = Note('F#', octave=4, duration=1 / 4, volume=100)
self.g3 = Note('G', octave=4, duration=1 / 4, volume=100)
self.g3d = Note('G#', octave=4, duration=1 / 4, volume=100)
self.a3 = Note('A', octave=4, duration=1 / 4, volume=100)
self.a3d = Note('A#', octave=4, duration=1 / 4, volume=100)
self.b3 = Note('B', octave=4, duration=1 / 4, volume=100)
self.c4 = Note('C', octave=5, duration=1 / 4, volume=100) # DO4
self.c4d = Note('C#', octave=5, duration=1 / 4, volume=100)
self.d4 = Note('D', octave=5, duration=1 / 4, volume=100)
self.d4d = Note('D#', octave=5, duration=1 / 4, volume=100)
self.e4 = Note('E', octave=5, duration=1 / 4, volume=100)
self.f4 = Note('F', octave=5, duration=1 / 4, volume=100)
self.f4d = Note('F#', octave=5, duration=1 / 4, volume=100)
self.g4 = Note('G', octave=5, duration=1 / 4, volume=100)
self.g4d = Note('G#', octave=5, duration=1 / 4, volume=100)
self.a4 = Note('A', octave=5, duration=1 / 4, volume=100)
self.a4d = Note('A#', octave=5, duration=1 / 4, volume=100)
self.b4 = Note('B', octave=5, duration=1 / 4, volume=100)
### Definition des gammes
self.doma = Gamme('Do majeur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'])
self.lami = Gamme('La mineur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'])
self.solma = Gamme('Sol majeur', ['Do', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.mimi = Gamme('Mi mineur', ['Do', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.rema = Gamme('Re majeur', ['Dod', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.simi = Gamme('Si mineur', ['Dod', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.lama = Gamme('La majeur', ['Dod', 'Re', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.fadmi = Gamme('Fad mineur', ['Dod', 'Re', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.mima = Gamme('Mi majeur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.dodmi = Gamme('Dod mineur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.sima = Gamme('Si majeur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'Lad', 'Si'])
self.soldmi = Gamme('Sold mineur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'Lad', 'Si'])
self.fadma = Gamme('Fad majeur', ['Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad', 'Si'])
self.redmi = Gamme('Red mineur', ['Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad', 'Si'])
self.dodma = Gamme('Dod majeur', ['Do', 'Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad'])
self.ladmi = Gamme('Lad mineur', ['Do', 'Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad'])
self.fama = Gamme('Fa majeur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Lad'])
self.remi = Gamme('Re mineur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Lad'])
self.ladma = Gamme('Lad majeur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'La', 'Lad'])
self.solmi = Gamme('Sol mineur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'La', 'Lad'])
self.redma = Gamme('Red majeur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.domi = Gamme('Do mineur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.soldma = Gamme('Sold majeur', ['Do', 'Dod', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.fami = Gamme('Fa mineur', ['Do', 'Dod', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.markov = [] # tableau de markov
self.markovtps = [] # tab de markov pour le temps
for i in range(36):
self.markov.append([0] * 36)
self.markov = markov1[:]
gamme = genre.chooseGenre()
self.generate(gamme)
# Generation avec Markov
def generate(self, gamme):
noteactuelle = ""
notes = []
if gamme == "Do majeur" or gamme == "La mineur":
notes = self.doma.getNotes()
if gamme == "Sol majeur" or gamme == "Mi mineur":
notes = list(self.solma.getNotes())
if gamme == "Re majeur" or gamme == "Si mineur":
notes = list(self.redmi.getNotes())
if gamme == "La majeur" or gamme == "Fa# mineur":
notes = list(self.lama.getNotes())
if gamme == "Mi majeur" or gamme == "Do# mineur":
notes = list(self.mima.getNotes())
if gamme == "Si majeur" or gamme == "Sol# mineur":
notes = list(self.sima.getNotes())
if gamme == "Fa# majeur" or gamme == "Re# mineur":
notes = list(self.fadma.getNotes())
if gamme == "Do# majeur" or gamme == "La# mineur":
notes = list(self.dodma.getNotes())
if gamme == "Fa majeur" or gamme == "Re mineur":
notes = list(self.fama.getNotes())
if gamme == "La# majeur" or gamme == "Sol mineur":
notes = list(self.ladma.getNotes())
if gamme == "Re# majeur" or gamme == "Do mineur":
notes = list(self.redma.getNotes())
if gamme == "Sol# majeur" or gamme == "Fa mineur":
notes = list(self.soldma.getNotes())
total = 0
for i in range(36):
for j in range(36):
total += self.markov[i][j]
for x in range(36):
self.markov[i][x] = self.markov[i][x] / total
total = 0
for i in range(36):
print(i, "|", self.markov[i])
markovtps = ([[0.1, 0.3, 0.6, 0, 0],
[0.2, 0.2, 0.5, 0.1, 0],
[0.1, 0.1, 0.5, 0.2, 0.1],
[0, 0.05, 0.3, 0.6, 0.05],
[0, 0.05, 0.3, 0.35, 0.3]])
print("Gamme du morceau généré : ", gamme)
Genre.gamme = gamme
easyMIDI = EasyMIDI()
track1 = Track("acoustic grand piano")
temps = [1, 1 / 2, 1 / 4, 1 / 8, 1 / 16]
tempsrandom = randrange(0, 5)
# Premiere note aléatoire dans la gamme
notesrandom = randrange(0, 7)
tempsactuelle = ""
if notes[notesrandom] == "Do":
hauteur = randrange(1, 4)
if hauteur == 1:
self.c2.setDuration(temps[tempsrandom])
track1.addNote(self.c2)
noteactuelle = 0
tempsactuelle = tempsrandom
if hauteur == 2:
self.c3.setDuration(temps[tempsrandom])
track1.addNote(self.c3)
noteactuelle = 12
tempsactuelle = tempsrandom
if hauteur == 3:
self.c4.setDuration(temps[tempsrandom])
track1.addNote(self.c4)
noteactuelle = 24
tempsactuelle = tempsrandom
if notes[notesrandom] == "Dod":
hauteur = randrange(1, 4)
if hauteur == 1:
self.c2d.setDuration(temps[tempsrandom])
track1.addNote(self.c2d)
noteactuelle = 1
tempsactuelle = tempsrandom
if hauteur == 2:
self.c3d.setDuration(temps[tempsrandom])
track1.addNote(self.c3d)
noteactuelle = 13
tempsactuelle = tempsrandom
if hauteur == 3:
self.c4d.setDuration(temps[tempsrandom])
track1.addNote(self.c4d)
noteactuelle = 25
tempsactuelle = tempsrandom
if notes[notesrandom] == "Re":
hauteur = randrange(1, 4)
if hauteur == 1:
self.d2.setDuration(temps[tempsrandom])
track1.addNote(self.d2)
noteactuelle = 2
tempsactuelle = tempsrandom
if hauteur == 2:
self.d3.setDuration(temps[tempsrandom])
track1.addNote(self.d3)
noteactuelle = 14
tempsactuelle = tempsrandom
if hauteur == 3:
self.d4.setDuration(temps[tempsrandom])
track1.addNote(self.d4)
noteactuelle = 26
tempsactuelle = tempsrandom
if notes[notesrandom] == "Red":
hauteur = randrange(1, 4)
if hauteur == 1:
self.d2d.setDuration(temps[tempsrandom])
track1.addNote(self.d2d)
noteactuelle = 3
tempsactuelle = tempsrandom
if hauteur == 2:
self.d3d.setDuration(temps[tempsrandom])
track1.addNote(self.d3d)
noteactuelle = 15
tempsactuelle = tempsrandom
if hauteur == 3:
self.d4d.setDuration(temps[tempsrandom])
track1.addNote(self.d4d)
noteactuelle = 27
tempsactuelle = tempsrandom
if notes[notesrandom] == "Mi":
hauteur = randrange(1, 4)
if hauteur == 1:
self.e2.setDuration(temps[tempsrandom])
track1.addNote(self.e2)
noteactuelle = 4
tempsactuelle = tempsrandom
if hauteur == 2:
self.e3.setDuration(temps[tempsrandom])
track1.addNote(self.e3)
noteactuelle = 16
tempsactuelle = tempsrandom
if hauteur == 3:
self.e4.setDuration(temps[tempsrandom])
track1.addNote(self.e4)
noteactuelle = 28
tempsactuelle = tempsrandom
if notes[notesrandom] == "Fa":
hauteur = randrange(1, 4)
if hauteur == 1:
self.f2.setDuration(temps[tempsrandom])
track1.addNote(self.f2)
noteactuelle = 5
tempsactuelle = tempsrandom
if hauteur == 2:
self.f3.setDuration(temps[tempsrandom])
track1.addNote(self.f3)
noteactuelle = 17
tempsactuelle = tempsrandom
if hauteur == 3:
self.f4.setDuration(temps[tempsrandom])
track1.addNote(self.f4)
noteactuelle = 29
tempsactuelle = tempsrandom
if notes[notesrandom] == "Fad":
hauteur = randrange(1, 4)
if hauteur == 1:
self.f2d.setDuration(temps[tempsrandom])
track1.addNote(self.f2d)
noteactuelle = 6
tempsactuelle = tempsrandom
if hauteur == 2:
self.f3d.setDuration(temps[tempsrandom])
track1.addNote(self.f3d)
noteactuelle = 18
tempsactuelle = tempsrandom
if hauteur == 3:
self.f4d.setDuration(temps[tempsrandom])
track1.addNote(self.f4d)
noteactuelle = 30
tempsactuelle = tempsrandom
if notes[notesrandom] == "Sol":
hauteur = randrange(1, 4)
if hauteur == 1:
self.g2.setDuration(temps[tempsrandom])
track1.addNote(self.g2)
noteactuelle = 7
tempsactuelle = tempsrandom
if hauteur == 2:
self.g3.setDuration(temps[tempsrandom])
track1.addNote(self.g3)
noteactuelle = 19
tempsactuelle = tempsrandom
if hauteur == 3:
self.g4.setDuration(temps[tempsrandom])
track1.addNote(self.g4)
noteactuelle = 31
tempsactuelle = tempsrandom
if notes[notesrandom] == "Sold":
hauteur = randrange(1, 4)
if hauteur == 1:
self.g2d.setDuration(temps[tempsrandom])
track1.addNote(self.g2d)
noteactuelle = 8
tempsactuelle = tempsrandom
if hauteur == 2:
self.g3d.setDuration(temps[tempsrandom])
track1.addNote(self.g3d)
noteactuelle = 20
tempsactuelle = tempsrandom
if hauteur == 3:
self.g4d.setDuration(temps[tempsrandom])
track1.addNote(self.g4d)
noteactuelle = 32
tempsactuelle = tempsrandom
if notes[notesrandom] == "La":
hauteur = randrange(1, 4)
if hauteur == 1:
self.a2.setDuration(temps[tempsrandom])
track1.addNote(self.a2)
noteactuelle = 9
tempsactuelle = tempsrandom
if hauteur == 2:
self.a3.setDuration(temps[tempsrandom])
track1.addNote(self.a3)
noteactuelle = 21
tempsactuelle = tempsrandom
if hauteur == 3:
self.a4.setDuration(temps[tempsrandom])
track1.addNote(self.a4)
noteactuelle = 33
tempsactuelle = tempsrandom
if notes[notesrandom] == "Lad":
hauteur = randrange(1, 4)
if hauteur == 1:
self.a2d.setDuration(temps[tempsrandom])
track1.addNote(self.a2d)
noteactuelle = 10
tempsactuelle = tempsrandom
if hauteur == 2:
self.a3d.setDuration(temps[tempsrandom])
track1.addNote(self.a3d)
noteactuelle = 22
tempsactuelle = tempsrandom
if hauteur == 3:
self.a4d.setDuration(temps[tempsrandom])
track1.addNote(self.a4d)
noteactuelle = 34
tempsactuelle = tempsrandom
if notes[notesrandom] == "Si":
hauteur = randrange(1, 4)
if hauteur == 1:
self.b2.setDuration(temps[tempsrandom])
track1.addNote(self.b2)
noteactuelle = 11
tempsactuelle = tempsrandom
if hauteur == 2:
self.b3.setDuration(temps[tempsrandom])
track1.addNote(self.b3)
noteactuelle = 23
tempsactuelle = tempsrandom
if hauteur == 3:
self.b4.setDuration(temps[tempsrandom])
track1.addNote(self.b4)
noteactuelle = 35
tempsactuelle = tempsrandom
taillemorceau = randrange(100, 200)
counter = 0
# on se base sur la premiere note pour generer les suivantes :
while taillemorceau > counter:
# selectionne la bonne ligne
tab = np.array(self.markov[noteactuelle])
tabtps = np.array(markovtps[tempsactuelle])
# suit la loi de probabilité et selectionne une valeur
notesuivantetab = np.random.choice(36, 1, p=tab)
tempssuivanttab = np.random.choice(5, 1, p=tabtps)
# récupération de la valeur
notesuivante = int(notesuivantetab[0])
tempssuivant = int(tempssuivanttab[0])
if notesuivante == 0 and "Do" in notes:
self.c2.setDuration(temps[tempssuivant])
track1.addNote(self.c2)
counter += 1
if notesuivante == 1 and "Dod" in notes:
self.c2d.setDuration(temps[tempssuivant])
track1.addNote(self.c2d)
counter += 1
if notesuivante == 2 and "Re" in notes:
self.d2.setDuration(temps[tempssuivant])
track1.addNote(self.d2)
counter += 1
if notesuivante == 3 and "Red" in notes:
self.d2d.setDuration(temps[tempssuivant])
track1.addNote(self.d2d)
counter += 1
if notesuivante == 4 and "Mi" in notes:
self.e2.setDuration(temps[tempssuivant])
track1.addNote(self.e2)
counter += 1
if notesuivante == 5 and "Fa" in notes:
self.f2.setDuration(temps[tempssuivant])
track1.addNote(self.f2)
counter += 1
if notesuivante == 6 and "Fad" in notes:
self.f2d.setDuration(temps[tempssuivant])
track1.addNote(self.f2d)
counter += 1
if notesuivante == 7 and "Sol" in notes:
self.g2.setDuration(temps[tempssuivant])
track1.addNote(self.g2)
counter += 1
if notesuivante == 8 and "Sold" in notes:
self.g2d.setDuration(temps[tempssuivant])
track1.addNote(self.g2d)
counter += 1
if notesuivante == 9 and "La" in notes:
self.a2.setDuration(temps[tempssuivant])
track1.addNote(self.a2)
counter += 1
if notesuivante == 10 and "Lad" in notes:
self.a2d.setDuration(temps[tempssuivant])
track1.addNote(self.a2d)
counter += 1
if notesuivante == 11 and "Si" in notes:
self.b2.setDuration(temps[tempssuivant])
track1.addNote(self.b2)
counter += 1
if notesuivante == 12 and "Do" in notes:
self.c3.setDuration(temps[tempssuivant])
track1.addNote(self.c3)
counter += 1
if notesuivante == 13 and "Dod" in notes:
self.c3d.setDuration(temps[tempssuivant])
track1.addNote(self.c3d)
counter += 1
if notesuivante == 14 and "Re" in notes:
self.d3.setDuration(temps[tempssuivant])
track1.addNote(self.d3)
counter += 1
if notesuivante == 15 and "Red" in notes:
self.d3d.setDuration(temps[tempssuivant])
track1.addNote(self.d3d)
counter += 1
if notesuivante == 16 and "Mi" in notes:
self.e3.setDuration(temps[tempssuivant])
track1.addNote(self.e3)
counter += 1
if notesuivante == 17 and "Fa" in notes:
self.f3.setDuration(temps[tempssuivant])
track1.addNote(self.f3)
counter += 1
if notesuivante == 18 and "Fad" in notes:
self.f3d.setDuration(temps[tempssuivant])
track1.addNote(self.f3d)
counter += 1
if notesuivante == 19 and "Sol" in notes:
self.g3.setDuration(temps[tempssuivant])
track1.addNote(self.g3)
counter += 1
if notesuivante == 20 and "Sold" in notes:
self.g3d.setDuration(temps[tempssuivant])
track1.addNote(self.g3d)
counter += 1
if notesuivante == 21 and "La" in notes:
self.a3.setDuration(temps[tempssuivant])
track1.addNote(self.a3)
counter += 1
if notesuivante == 22 and "Lad" in notes:
self.a3d.setDuration(temps[tempssuivant])
track1.addNote(self.a3d)
counter += 1
if notesuivante == 23 and "Si" in notes:
self.b3.setDuration(temps[tempssuivant])
track1.addNote(self.b3)
counter += 1
if notesuivante == 24 and "Do" in notes:
self.c4.setDuration(temps[tempssuivant])
track1.addNote(self.c4)
counter += 1
if notesuivante == 25 and "Dod" in notes:
self.c4d.setDuration(temps[tempssuivant])
track1.addNote(self.c4d)
counter += 1
if notesuivante == 26 and "Re" in notes:
self.d4.setDuration(temps[tempssuivant])
track1.addNote(self.d4)
counter += 1
if notesuivante == 27 and "Red" in notes:
self.d4d.setDuration(temps[tempssuivant])
track1.addNote(self.d4d)
counter += 1
if notesuivante == 28 and "Mi" in notes:
self.e4.setDuration(temps[tempssuivant])
track1.addNote(self.e4)
counter += 1
if notesuivante == 29 and "Fa" in notes:
self.f4.setDuration(temps[tempssuivant])
track1.addNote(self.f4)
counter += 1
if notesuivante == 30 and "Fad" in notes:
self.f4d.setDuration(temps[tempssuivant])
track1.addNote(self.f4d)
counter += 1
if notesuivante == 31 and "Sol" in notes:
self.g4.setDuration(temps[tempssuivant])
track1.addNote(self.g4)
counter += 1
if notesuivante == 32 and "Sold" in notes:
self.g4d.setDuration(temps[tempssuivant])
track1.addNote(self.g4d)
counter += 1
if notesuivante == 33 and "La" in notes:
self.a4.setDuration(temps[tempssuivant])
track1.addNote(self.a4)
counter += 1
if notesuivante == 34 and "Lad" in notes:
self.a4d.setDuration(temps[tempssuivant])
track1.addNote(self.a4d)
counter += 1
if notesuivante == 35 and "Si" in notes:
self.b4.setDuration(temps[tempssuivant])
track1.addNote(self.b4)
counter += 1
noteactuelle = notesuivante
tempsactuelle = tempssuivant
try:
os.remove("markov.mid")
except:
print("N'existe pas encore")
easyMIDI.addTrack(track1)
easyMIDI.writeMIDI("markov.mid")
print("Song créé avec succès")
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,882
|
MagicFame/ArtScore
|
refs/heads/master
|
/AleatoireGen.py
|
import os
from random import randrange
from EasyMIDI import EasyMIDI, Track, Note
from Gamme import Gamme
class AleatoireGen:
# Constructeur
def __init__(self, genre):
### Definition des notes
self.c2 = Note('C', octave=3, duration=1 / 4, volume=100) # DO2
self.c2d = Note('C#', octave=3, duration=1 / 4, volume=100)
self.d2 = Note('D', octave=3, duration=1 / 4, volume=100)
self.d2d = Note('D#', octave=3, duration=1 / 4, volume=100)
self.e2 = Note('E', octave=3, duration=1 / 4, volume=100)
self.f2 = Note('F', octave=3, duration=1 / 4, volume=100)
self.f2d = Note('F#', octave=3, duration=1 / 4, volume=100)
self.g2 = Note('G', octave=3, duration=1 / 4, volume=100)
self.g2d = Note('G#', octave=3, duration=1 / 4, volume=100)
self.a2 = Note('A', octave=3, duration=1 / 4, volume=100)
self.a2d = Note('A#', octave=3, duration=1 / 4, volume=100)
self.b2 = Note('B', octave=3, duration=1 / 4, volume=100)
self.c3 = Note('C', octave=4, duration=1 / 4, volume=100) # DO3
self.c3d = Note('C#', octave=4, duration=1 / 4, volume=100)
self.d3 = Note('D', octave=4, duration=1 / 4, volume=100)
self.d3d = Note('D#', octave=4, duration=1 / 4, volume=100)
self.e3 = Note('E', octave=4, duration=1 / 4, volume=100)
self.f3 = Note('F', octave=4, duration=1 / 4, volume=100)
self.f3d = Note('F#', octave=4, duration=1 / 4, volume=100)
self.g3 = Note('G', octave=4, duration=1 / 4, volume=100)
self.g3d = Note('G#', octave=4, duration=1 / 4, volume=100)
self.a3 = Note('A', octave=4, duration=1 / 4, volume=100)
self.a3d = Note('A#', octave=4, duration=1 / 4, volume=100)
self.b3 = Note('B', octave=4, duration=1 / 4, volume=100)
self.c4 = Note('C', octave=5, duration=1 / 4, volume=100) # DO4
self.c4d = Note('C#', octave=5, duration=1 / 4, volume=100)
self.d4 = Note('D', octave=5, duration=1 / 4, volume=100)
self.d4d = Note('D#', octave=5, duration=1 / 4, volume=100)
self.e4 = Note('E', octave=5, duration=1 / 4, volume=100)
self.f4 = Note('F', octave=5, duration=1 / 4, volume=100)
self.f4d = Note('F#', octave=5, duration=1 / 4, volume=100)
self.g4 = Note('G', octave=5, duration=1 / 4, volume=100)
self.g4d = Note('G#', octave=5, duration=1 / 4, volume=100)
self.a4 = Note('A', octave=5, duration=1 / 4, volume=100)
self.a4d = Note('A#', octave=5, duration=1 / 4, volume=100)
self.b4 = Note('B', octave=5, duration=1 / 4, volume=100)
### Definition des gammes
self.doma = Gamme('Do majeur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'])
self.lami = Gamme('La mineur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Si'])
self.solma = Gamme('Sol majeur', ['Do', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.mimi = Gamme('Mi mineur', ['Do', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.rema = Gamme('Re majeur', ['Dod', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.simi = Gamme('Si mineur', ['Dod', 'Re', 'Mi', 'Fad', 'Sol', 'La', 'Si'])
self.lama = Gamme('La majeur', ['Dod', 'Re', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.fadmi = Gamme('Fad mineur', ['Dod', 'Re', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.mima = Gamme('Mi majeur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.dodmi = Gamme('Dod mineur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'La', 'Si'])
self.sima = Gamme('Si majeur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'Lad', 'Si'])
self.soldmi = Gamme('Sold mineur', ['Dod', 'Red', 'Mi', 'Fad', 'Sold', 'Lad', 'Si'])
self.fadma = Gamme('Fad majeur', ['Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad', 'Si'])
self.redmi = Gamme('Red mineur', ['Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad', 'Si'])
self.dodma = Gamme('Dod majeur', ['Do', 'Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad'])
self.ladmi = Gamme('Lad mineur', ['Do', 'Dod', 'Red', 'Fa', 'Fad', 'Sold', 'Lad'])
self.fama = Gamme('Fa majeur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Lad'])
self.remi = Gamme('Re mineur', ['Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Lad'])
self.ladma = Gamme('Lad majeur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'La', 'Lad'])
self.solmi = Gamme('Sol mineur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'La', 'Lad'])
self.redma = Gamme('Red majeur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.domi = Gamme('Do mineur', ['Do', 'Re', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.soldma = Gamme('Sold majeur', ['Do', 'Dod', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.fami = Gamme('Fa mineur', ['Do', 'Dod', 'Red', 'Fa', 'Sol', 'Sold', 'Lad'])
self.dissonnance = 0
gamme = genre.chooseGenre()
print(gamme)
dissonnance = genre.dissonanceMoyenne() - 10
self.generate(gamme, dissonnance)
# Generation musique
def generate(self, gamme, dissonnance):
notes = []
if gamme == "Do majeur" or gamme == "La mineur":
notes = self.doma.getNotes()
if gamme == "Sol majeur" or gamme == "Mi mineur":
notes = list(self.solma.getNotes())
if gamme == "Re majeur" or gamme == "Si mineur":
notes = list(self.redmi.getNotes())
if gamme == "La majeur" or gamme == "Fa# mineur":
notes = list(self.lama.getNotes())
if gamme == "Mi majeur" or gamme == "Do# mineur":
notes = list(self.mima.getNotes())
if gamme == "Si majeur" or gamme == "Sol# mineur":
notes = list(self.sima.getNotes())
if gamme == "Fa# majeur" or gamme == "Re# mineur":
notes = list(self.fadma.getNotes())
if gamme == "Do# majeur" or gamme == "La# mineur":
notes = list(self.dodma.getNotes())
if gamme == "Fa majeur" or gamme == "Re mineur":
notes = list(self.fama.getNotes())
if gamme == "La# majeur" or gamme == "Sol mineur":
notes = list(self.ladma.getNotes())
if gamme == "Re# majeur" or gamme == "Do mineur":
notes = list(self.redma.getNotes())
if gamme == "Sol# majeur" or gamme == "Fa mineur":
notes = list(self.soldma.getNotes())
counter = 0
# definition longueur morceau (100 à 200 notes)
taillemorceau = randrange(100, 200)
dissonnanceactuelle = 100
easyMIDI = EasyMIDI()
track1 = Track("acoustic grand piano") # oops
while counter < taillemorceau:
# On ajoute une note de la gamme
notesrandom = randrange(0, 7)
tempsrandom = randrange(0, 5)
temps = [1, 1 / 2, 1 / 4, 1 / 8, 1 / 16]
if notes[notesrandom] == "Do":
hauteur = randrange(1, 4)
if hauteur == 1:
self.c2.setDuration(temps[tempsrandom])
track1.addNote(self.c2)
if hauteur == 2:
self.c3.setDuration(temps[tempsrandom])
track1.addNote(self.c3)
if hauteur == 3:
self.c4.setDuration(temps[tempsrandom])
track1.addNote(self.c4)
if notes[notesrandom] == "Dod":
hauteur = randrange(1, 4)
if hauteur == 1:
self.c2d.setDuration(temps[tempsrandom])
track1.addNote(self.c2d)
if hauteur == 2:
self.c3d.setDuration(temps[tempsrandom])
track1.addNote(self.c3d)
if hauteur == 3:
self.c4d.setDuration(temps[tempsrandom])
track1.addNote(self.c4d)
if notes[notesrandom] == "Re":
hauteur = randrange(1, 4)
if hauteur == 1:
self.d2.setDuration(temps[tempsrandom])
track1.addNote(self.d2)
if hauteur == 2:
self.d3.setDuration(temps[tempsrandom])
track1.addNote(self.d3)
if hauteur == 3:
self.d4.setDuration(temps[tempsrandom])
track1.addNote(self.d4)
if notes[notesrandom] == "Red":
hauteur = randrange(1, 4)
if hauteur == 1:
self.d2d.setDuration(temps[tempsrandom])
track1.addNote(self.d2d)
if hauteur == 2:
self.d3d.setDuration(temps[tempsrandom])
track1.addNote(self.d3d)
if hauteur == 3:
self.d4d.setDuration(temps[tempsrandom])
track1.addNote(self.d4d)
if notes[notesrandom] == "Mi":
hauteur = randrange(1, 4)
if hauteur == 1:
self.e2.setDuration(temps[tempsrandom])
track1.addNote(self.e2)
if hauteur == 2:
self.e3.setDuration(temps[tempsrandom])
track1.addNote(self.e3)
if hauteur == 3:
self.e4.setDuration(temps[tempsrandom])
track1.addNote(self.e4)
if notes[notesrandom] == "Fa":
hauteur = randrange(1, 4)
if hauteur == 1:
self.f2.setDuration(temps[tempsrandom])
track1.addNote(self.f2)
if hauteur == 2:
self.f3.setDuration(temps[tempsrandom])
track1.addNote(self.f3)
if hauteur == 3:
self.f4.setDuration(temps[tempsrandom])
track1.addNote(self.f4)
if notes[notesrandom] == "Fad":
hauteur = randrange(1, 4)
if hauteur == 1:
self.f2d.setDuration(temps[tempsrandom])
track1.addNote(self.f2d)
if hauteur == 2:
self.f3d.setDuration(temps[tempsrandom])
track1.addNote(self.f3d)
if hauteur == 3:
self.f4d.setDuration(temps[tempsrandom])
track1.addNote(self.f4d)
if notes[notesrandom] == "Sol":
hauteur = randrange(1, 4)
if hauteur == 1:
self.g2.setDuration(temps[tempsrandom])
track1.addNote(self.g2)
if hauteur == 2:
self.g3.setDuration(temps[tempsrandom])
track1.addNote(self.g3)
if hauteur == 3:
self.g4.setDuration(temps[tempsrandom])
track1.addNote(self.g4)
if notes[notesrandom] == "Sold":
hauteur = randrange(1, 4)
if hauteur == 1:
self.g2d.setDuration(temps[tempsrandom])
track1.addNote(self.g2d)
if hauteur == 2:
self.g3d.setDuration(temps[tempsrandom])
track1.addNote(self.g3d)
if hauteur == 3:
self.g4d.setDuration(temps[tempsrandom])
track1.addNote(self.g4d)
if notes[notesrandom] == "La":
hauteur = randrange(1, 4)
if hauteur == 1:
self.a2.setDuration(temps[tempsrandom])
track1.addNote(self.a2)
if hauteur == 2:
self.a3.setDuration(temps[tempsrandom])
track1.addNote(self.a3)
if hauteur == 3:
self.a4.setDuration(temps[tempsrandom])
track1.addNote(self.a4)
if notes[notesrandom] == "Lad":
hauteur = randrange(1, 4)
if hauteur == 1:
self.a2d.setDuration(temps[tempsrandom])
track1.addNote(self.a2d)
if hauteur == 2:
self.a3d.setDuration(temps[tempsrandom])
track1.addNote(self.a3d)
if hauteur == 3:
self.a4d.setDuration(temps[tempsrandom])
track1.addNote(self.a4d)
if notes[notesrandom] == "Si":
hauteur = randrange(1, 4)
if hauteur == 1:
self.b2.setDuration(temps[tempsrandom])
track1.addNote(self.b2)
if hauteur == 2:
self.b3.setDuration(temps[tempsrandom])
track1.addNote(self.b3)
if hauteur == 3:
self.b4.setDuration(temps[tempsrandom])
track1.addNote(self.b4)
counter = counter + 1
print("Dissonnance du morceau créé : ", dissonnanceactuelle / 8)
try:
os.remove("random.mid")
except:
print("N'existe pas encore")
easyMIDI.addTrack(track1)
easyMIDI.writeMIDI("random.mid")
print("Song créé avec succès")
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,883
|
MagicFame/ArtScore
|
refs/heads/master
|
/venv/Lib/site-packages/mido/midifiles/__init__.py
|
from .meta import MetaMessage, UnknownMetaMessage
from .units import tick2second, second2tick, bpm2tempo, tempo2bpm
from .tracks import MidiTrack, merge_tracks
from .midifiles import MidiFile
|
{"/main.py": ["/MarkovGen.py", "/AleatoireGen.py", "/Genre.py"], "/MarkovGen.py": ["/Gamme.py", "/Genre.py"], "/AleatoireGen.py": ["/Gamme.py"]}
|
39,888
|
jbricht/mips_assembler
|
refs/heads/master
|
/parse.py
|
import collections
import re
import enum
import mips_instructions
decimal_number = re.compile('-?[0-9]+')
hex_number = re.compile('-?0x[0-9a-f]+', re.IGNORECASE)
comma = re.compile(',')
AsmInstruction = collections.namedtuple('AsmInstruction', ['mnemonic', 'operands'])
Token = collections.namedtuple("Token", ['type', 'value'])
OperandType = enum.Enum('OperandType', 'label literal register displaced')
AsmStatement = collections.namedtuple('AsmStatement', ['label', 'instruction'])
class ParseError(Exception):
def __init__(self, pos, expected, gotten):
self.pos = pos
self.expected = expected
self.gotten = gotten
class Lexer:
punctuation_types = {
':': 'colon',
',': 'comma',
'$': 'dollar',
'\n': 'newline',
'(': 'left_paren',
')': 'right_paren'
}
punctuation = ''.join(punctuation_types.keys())
mnemonics = set(mips_instructions.encoders.keys())
registers = set(mips_instructions.register_names)
whitespace = re.compile(r'[ \t]+')
identifier = re.compile(r'[a-z_][a-z0-9_]*', re.I)
decimal = re.compile('-?[0-9]+')
hex = re.compile('-?0x[0-9a-fA-F]+')
def __init__(self, input_string):
self.pos = 0
self.input = input_string
pass
def match_re(self, compiled_re):
return compiled_re.match(self.input, self.pos)
def next_token(self):
# match single-character punctuation
cur = self.input[self.pos]
if cur in self.punctuation:
self.pos += 1
return Token(self.punctuation_types[cur], cur)
"""
The re package does not have any way to do maximal-munch, so the
following is necessary to resolve ambiguities between identifiers,
mnemonics, and registers, as well as ambiguities among mnemonics
such as add, addi, addiu, etc.
"""
m = self.match_re(self.identifier)
if m:
self.pos = m.end()
if m.group() in self.mnemonics:
tok_type = 'mnemonic'
elif m.group() in self.registers:
tok_type = 'register'
else:
tok_type = 'identifier'
return Token(tok_type, m.group())
m = self.match_re(self.whitespace)
if m:
self.pos = m.end()
return Token('whitespace', m.group())
m = self.match_re(self.decimal)
if m:
self.pos = m.end()
return Token('decimal', m.group())
m = self.match_re(self.hex)
if m:
self.pos = m.end()
return Token('hex', m.group())
return None # No matching token found
def lex(self):
while self.pos < len(self.input):
t = self.next_token()
if t is None:
raise Exception("Could not find valid token.", self.pos)
elif t.type == 'whitespace':
continue
else:
yield t
class Parser:
def __init__(self, tokens):
self.pos = 0
self.toks = list(tokens)
def cur_tok(self):
return self.toks[self.pos]
def advance_tok(self):
self.pos += 1
def match(self, tok_type):
ct = self.cur_tok()
if ct.type == tok_type:
self.advance_tok()
return ct
else:
raise ParseError(self.pos, tok_type, ct.type)
def register(self):
self.match("dollar")
return self.match("register")
def label(self):
return self.match("identifier")
def literal(self):
# this is ugly, should have one token type for all literals
ct = self.cur_tok()
if ct.type in {'hex', 'decimal'}:
self.advance_tok()
return ct
else:
# eeeew
raise ParseError(self.pos, "hex or decimal", ct.type)
def operand(self):
"""
returns a tuple of the operand type, and the operand itself.
for disp($reg) operands, returns (type, displacement, register)
otherwise just (type, operand)
the displacement must be a literal
"""
if self.cur_tok().type in {'hex', 'decimal'}:
# either just a literal or disp($reg)
lit = self.literal()
if self.cur_tok().type == 'left_paren':
self.match('left_paren')
base_reg = self.register()
self.match('right_paren')
return OperandType.displaced, lit, base_reg
else:
return OperandType.literal, lit
elif self.cur_tok().type == 'identifier':
return OperandType.label, self.label()
elif self.cur_tok().type == 'dollar':
return OperandType.register, self.register()
def operand_list(self):
"""
operand {comma operand}
"""
ops = [self.operand()]
while self.cur_tok().type == 'comma':
self.match('comma')
ops.append(self.operand())
return ops
def line(self):
"""
[identifier colon] [mnemonic [operand_list]] newline
"""
label = None
if self.cur_tok().type == "identifier":
label = self.match("identifier")
self.match("colon")
if self.cur_tok().type == 'newline':
return label, None, None
mnemo = self.match('mnemonic')
if self.cur_tok().type == "newline":
operands = []
else:
operands = self.operand_list()
self.match("newline")
return label, mnemo, operands
def unpack_operand_list(ops):
"""
This massages lists of operands into the order expected by the encoders,
and unpacks them from Tokens into bare strings.
"""
if len(ops) > 3: # no instruction has more than 3 operands
raise Exception
if OperandType.displaced in [o[0] for o in ops]:
# must be of the form op rt, disp(rs)
if ops[0][0] != OperandType.register or len(ops) != 2:
raise Exception
rs = ops[1][2]
displacement = ops[1][1]
rt = ops[0][1]
return rt.value, rs.value, displacement.value
else:
return [o[1].value for o in ops]
def none_safe_value(foo):
# null checks are cool i guess
if foo is None:
return None
else:
return foo.value
def parse_file(fp):
"""
Lexes and parses a file line by line.
:param fp: The assembler source file to parse.
:return: An iterator of AsmStatements
"""
for line in fp.readlines():
if not line or line == '\n':
continue
if not line.endswith('\n'):
line += '\n'
label, mnemonic, op_list = Parser(Lexer(line).lex()).line()
if mnemonic is None:
yield AsmStatement(label.value, None)
else:
yield AsmStatement(none_safe_value(label),
AsmInstruction(none_safe_value(mnemonic), unpack_operand_list(op_list)))
if __name__ == '__main__':
from pprint import pprint
test_file = open('strcpy.asm', 'r')
parsed_lines = []
for l in test_file.readlines():
p = Parser(Lexer(l).lex())
parsed_lines.append(p.line())
pprint(parsed_lines)
|
{"/parse.py": ["/mips_instructions.py"], "/assembler.py": ["/mips_instructions.py", "/parse.py"]}
|
39,889
|
jbricht/mips_assembler
|
refs/heads/master
|
/mips_instructions.py
|
def encode_r_format(funct, rd, rs, rt, opcode=0, shamt=0):
return (opcode << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (shamt <<
6) | funct
def encode_i_format(opcode, rs, rt, immediate):
return (opcode << 26) | (rs << 21) | (rt << 16) | (immediate & 0xffff)
def encode_j_format(opcode, address):
return (opcode << 26) | ((address >> 2) % (2 ** 26))
def r_format_encoder(funct):
"""
returns an encoder for the most common r-format instructions, which operate
on three registers rd rs rt, take no shamt, and have opcode 0
"""
def encoder(rd, rs, rt):
return encode_r_format(funct, rd, rs, rt)
return encoder
def i_format_encoder(opcode):
"""
returns an encoder fror the most common i-format instructions, which use
rt, rs, and an immediate
"""
def encoder(rt, rs, immediate):
return encode_i_format(opcode, rs, rt, immediate)
return encoder
def j_format_encoder(opcode):
def encoder(address):
return encode_j_format(opcode, address)
return encoder
def offset_field(offset):
"""
Branch instructions take an 18-bit offset, but it's aligned so it only takes 16 bits.
"""
return (offset >> 2) & 0xffff
# map mnemonics to functions which encode that instruction
encoders = {
'add': r_format_encoder(0x20),
'addi': i_format_encoder(0x8),
'addiu': i_format_encoder(0x9),
'addu': r_format_encoder(0x21),
'and': r_format_encoder(0x24),
'andi': i_format_encoder(0xc),
'b': lambda offset: 0b000100 << 26 | offset_field(offset),
'bal': lambda offset: 0b000001 << 26 | 0b10001 << 16 | offset_field(offset),
'beq': lambda rs, rt, offset: 0b000100 << 26 | rs << 21 | rt << 16 | offset_field(offset),
'bgez': lambda rs, offset: 0b000001 << 26 | rs << 21 | 0b00001 << 16 | offset_field(offset),
'bgezal': lambda rs, offset: 0b000001 << 26 | rs << 21 | 0b10001 << 16 | offset_field(offset),
'bgtz': lambda rs, offset: 0b000111 << 26 | rs << 21 | offset_field(offset),
'blez': lambda rs, offset: 0b000110 << 26 | rs << 21 | offset_field(offset),
'bltz': lambda rs, offset: 0b000001 << 26 | rs << 21 | offset_field(offset),
'bltzal': lambda rs, offset: 0b000001 << 26 | rs << 21 | 0b10000 << 16 | offset_field(offset),
'bne': lambda rs, rt, offset: 0b000101 << 26 | rs << 21 | rt << 16 | offset_field(offset),
'break': lambda code: (code % 2 ** 20) << 6 | 0b001101,
'div': lambda rs, rt: encode_r_format(0x1a, 0, rs, rt),
'divu': lambda rs, rt: encode_r_format(0x1b, 0, rs, rt),
'j': j_format_encoder(0x2),
'jal': j_format_encoder(0x3),
'jalr': lambda rd, rs: rs << 21 | rd << 11 | 0b001001,
'jr': lambda rs: rs << 21 | 0b001000,
'lb': i_format_encoder(0b100000),
'lbu': i_format_encoder(0x24),
'lh': i_format_encoder(0b100001),
'lhu': i_format_encoder(0x25),
'lui': lambda rt, immediate: 0b001111 << 26 | rt << 16 | immediate,
'lw': i_format_encoder(0x23),
'lwl': i_format_encoder(0b100010),
'lwr': i_format_encoder(0b100110),
'mfhi': lambda rd: encode_r_format(0x10, rd, 0, 0),
'mflo': lambda rd: encode_r_format(0x12, rd, 0, 0),
'mthi': lambda rs: rs << 21 | 0b010001,
'mtlo': lambda rs: rs << 21 | 0b010011,
'mult': lambda rs, rt: encode_r_format(0x18, 0, rs, rt),
'multu': lambda rs, rt: encode_r_format(0x19, 0, rs, rt),
'nop': lambda: 0,
'nor': r_format_encoder(0x27),
'or': r_format_encoder(0x25),
'ori': i_format_encoder(0xd),
'sb': i_format_encoder(0x28),
'sh': i_format_encoder(0x29),
'sll': lambda rd, rt, shamt: encode_r_format(0x00, rd, 0, rt, shamt=shamt),
'sllv': lambda rd, rt, rs: encode_r_format(0b000100, rd, rs, rt),
'slt': r_format_encoder(0x2a),
'slti': i_format_encoder(0xa),
'sltiu': i_format_encoder(0xb),
'sltu': r_format_encoder(0x2b),
'sra': lambda rd, rt, shamt: encode_r_format(0x3, rd, 0, rt, shamt=shamt),
'srav': lambda rd, rt, rs: encode_r_format(0b000111, rd, rs, rt),
'srl': lambda rd, rt, shamt: encode_r_format(0x02, rd, 0, rt, shamt=shamt),
'srlv': lambda rd, rt, rs: encode_r_format(0b000110, rd, rs, rt),
'sub': r_format_encoder(0x22),
'subu': r_format_encoder(0x23),
'sw': i_format_encoder(0x2b),
'swl': i_format_encoder(0b101010),
'swr': i_format_encoder(0b101110),
'syscall': lambda code: (code % 2 ** 20) << 6 | 0b001100,
'xor': r_format_encoder(0b100110),
'xori': i_format_encoder(0b001110)
}
register_names = ['zero', 'at', 'v0', 'v1']
register_names.extend('a{}'.format(i) for i in range(4))
register_names.extend('t{}'.format(i) for i in range(8))
register_names.extend('s{}'.format(i) for i in range(8))
register_names.extend(['t8', 't9', 'k0', 'k1', 'gp', 'sp', 'fp', 'ra'])
registers = dict((name, num) for num, name in enumerate(register_names))
|
{"/parse.py": ["/mips_instructions.py"], "/assembler.py": ["/mips_instructions.py", "/parse.py"]}
|
39,890
|
jbricht/mips_assembler
|
refs/heads/master
|
/assembler.py
|
import struct
import mips_instructions
import parse
class BadLabel(Exception):
def __init__(self, label):
self.label = label
class Assembler:
"""
Tracks symbol values and the current address, keeps a buffer of
instructions to be assembled.
"""
def __init__(self):
self.ip = 0
self.symbols = dict()
self.instructions = []
def def_label(self, label):
if label in mips_instructions.register_names or label in mips_instructions.encoders.keys():
raise BadLabel(label)
else:
self.symbols[label] = self.ip
def push_instruction(self, insn):
self.instructions.append(insn)
self.ip += 4
def encode_operand(self, operand):
"""
Converts operands from their string representation to their
numerical representation, resolving label names along the way.
"""
if parse.decimal_number.match(operand):
return int(operand)
elif parse.hex_number.match(operand):
return int(operand, 16)
elif operand in mips_instructions.register_names:
return mips_instructions.registers[operand]
elif operand in self.symbols:
return self.symbols[operand]
else:
raise ValueError
def encode_instruction(self, insn):
encoder = mips_instructions.encoders[insn.mnemonic]
operands = [self.encode_operand(o) for o in insn.operands]
instruction_word = encoder(*operands)
return struct.pack('>I', instruction_word)
def encode_instructions(self):
return b''.join(self.encode_instruction(i) for i in self.instructions)
def assemble_file(fp):
"""
Read in a file and assemble it.
:param fp: a file-like object containing MIPS assembly code
:return: a bytes of the assembled machine code.
"""
assembler = Assembler()
assembly_statements = parse.parse_file(fp)
for stmt in assembly_statements:
if stmt.label:
assembler.def_label(stmt.label)
if stmt.instruction:
assembler.push_instruction(stmt.instruction)
return assembler.encode_instructions()
if __name__ == '__main__':
assembled = assemble_file(open('strcpy.asm'))
print(assembled.hex())
|
{"/parse.py": ["/mips_instructions.py"], "/assembler.py": ["/mips_instructions.py", "/parse.py"]}
|
39,891
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/tx.py
|
from .bucket import Bucket
from .share import meta_struct
class Tx:
def __init__(self, db, writable):
self.db = db
self.meta = db.meta()
self.root = Bucket(self, self.meta.root_pgid)
self.writable = writable
self.pages = {}
if self.writable:
self.txid = self.meta.txid + 1
self.closed = False
def id(self):
return self.txid
def size(self):
return self.meta.pgid * self.db.ps
def bucket(self, name=None):
if not name:
return self.root
else:
return self.root.bucket(name)
def create_bucket(self, name):
return self.root.create_bucket(name)
def delete_bucket(self, name):
return self.root.delete_bucket(name)
def cursor(self):
return self.root.cursor()
def commit(self):
if not self.writable:
self.close()
return
self.root.rebalance()
self.root.spill()
self.commit_freelist()
self.write()
self.write_meta()
self.close()
def page(self, pgid):
if pgid in self.pages:
return self.pages[pgid]
return self.db.page(pgid)
def allocate(self, n):
p = self.db.allocate(n)
self.pages[p.id] = p
return p
def commit_freelist(self):
p = self.page(self.meta.freelist)
self.db.freelist.write(p)
def write(self):
# for p in self.pages.values(): p.write_inodes()
pass
def write_meta(self):
pgid = self.txid % 2
p = self.db.page(pgid)
new_meta = meta_struct.pack(
self.meta.magic,
self.meta.version,
self.meta.pageSize,
self.meta.flags,
self.root.root_pgid,
0,
self.meta.freelist,
self.db.max_pgid,
self.txid,
0,
)
p.data[:len(new_meta)] = new_meta
self.db.mmap.obj.flush()
def for_each_page(self, pgid):
p = self.page(pgid)
yield p
if p.is_branch():
for elem in p.branch_elems():
self.for_each_page(elem.pgid)
def check_bucket(self, bucket, reachable):
if bucket.root_pgid == 0:
return
for p in bucket.tx.for_each_page(bucket.root_pgid):
if p.id > self.meta.max_pgid:
raise Exception("page out of bounds")
for i in range(p.overflow+1):
id = p.id + i
if id in reachable:
raise Exception("multiple references")
reachable[id] = True
for k, v in bucket:
if v is not None:
continue
child = bucket.bucket(k)
self.check_bucket(child, reachable)
def close(self):
if self.closed:
return
self.closed = True
if self.writable:
self.db.lock.release()
else:
self.db.mmap_lock.r_release()
def __del__(self):
self.close()
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,892
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/rwlock.py
|
from threading import Lock
class RWLock(object):
def __init__(self):
self.w_lock = Lock()
self.num_r_lock = Lock()
self.num_r = 0
def r_acquire(self):
self.num_r_lock.acquire()
self.num_r += 1
if self.num_r == 1:
self.w_lock.acquire()
self.num_r_lock.release()
def r_release(self):
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
def w_acquire(self):
self.w_lock.acquire()
def w_release(self):
self.w_lock.release()
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,893
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/page.py
|
from .node import Inode
from .share import leafPageFlag, branchPageFlag, freelistPageFlag, \
page_tuple, page_struct, \
leaf_elem_tuple, leaf_elem_struct, \
branch_elem_tuple, branch_elem_struct
class Page:
def __init__(self):
self.id = 0
self.flags = 0
self.count = 0
self.overflow = 0
self.header = None
self.data = None
self.inodes = None
def is_leaf(self):
return bool(self.flags & leafPageFlag)
def is_branch(self):
return bool(self.flags & branchPageFlag)
def leaf_elems(self):
if self.inodes is not None:
return self.inodes
elem_size = leaf_elem_struct.size
self.inodes = []
for i in range(self.count):
record = self.data[i*elem_size:(i+1)*elem_size]
e = leaf_elem_tuple._make(leaf_elem_struct.unpack(record))
pos = e.pos + i * elem_size
key = bytes(self.data[pos:pos+e.ksize])
value = bytes(self.data[pos+e.ksize:pos+e.ksize+e.vsize])
n = Inode(key, value, 0, e.flags)
self.inodes.append(n)
return self.inodes
def branch_elems(self):
if self.inodes is not None:
return self.inodes
elem_size = branch_elem_struct.size
self.inodes = []
for i in range(self.count):
record = self.data[i*elem_size:(i+1)*elem_size]
e = branch_elem_tuple._make(branch_elem_struct.unpack(record))
pos = e.pos + i * elem_size
key = bytes(self.data[pos:pos+e.ksize])
n = Inode(key, b"", e.pgid, 0)
self.inodes.append(n)
return self.inodes
def write_inodes(self, inodes):
self.count = len(inodes)
self.inodes = inodes
if self.is_leaf():
elem_size = leaf_elem_struct.size
else:
elem_size = branch_elem_struct.size
off = elem_size * self.count
for i in range(self.count):
n = self.inodes[i]
sz = len(n.key) + len(n.value)
if self.is_leaf():
pos = i * elem_size
# leaf_elem_tuple = namedtuple("leaf_elem", "flags pos ksize vsize")
b = leaf_elem_struct.pack(n.flags, off-pos, len(n.key), len(n.value))
self.data[pos:pos+len(b)] = b
self.data[off:off+len(n.key)] = n.key
try:
self.data[off+len(n.key):off+len(n.key)+len(n.value)] = n.value
except: # noqa
print(type(n.value), len(n.value))
raise
else:
pos = i * elem_size
# branch_elem_tuple = namedtuple("branch_elem", "pos ksize pgid")
b = branch_elem_struct.pack(off-pos, len(n.key), n.pgid)
self.data[pos:pos+len(b)] = b
self.data[off:off+len(n.key)] = n.key
# print("write branch", n.key)
off += sz
# print("write page", self.id, self.flags, self.count)
self.write_header()
def write_header(self):
b = page_struct.pack(self.id, self.flags, self.count, self.overflow)
self.header[:] = b
def free_ids(self):
ids = []
for i in range(self.count):
ids.append(int.from_bytes(self.data[i*8:(i+1)*8], "little"))
return ids
def write_ids(self, ids):
self.flags = freelistPageFlag
self.count = len(ids)
for i in ids:
self.data[i*8:(i+1)*8] = i.to_bytes(8, "little")
self.write_header()
def page_from_data(data):
page = Page()
p = page_tuple._make(page_struct.unpack(data[:page_struct.size]))
page.id = p.id
page.flags = p.flags
page.count = p.count
page.overflow = p.overflow
page.header = data[:page_struct.size]
page.data = data[page_struct.size:]
page.from_data = True
return page
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,894
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/db.py
|
import os
import fcntl
import mmap
import threading
import contextlib
from .freelist import FreeList
from .page import page_from_data
from .tx import Tx
from .rwlock import RWLock
from .share import leafPageFlag, metaPageFlag, freelistPageFlag, \
page_struct, meta_tuple, meta_struct
PAGESIZE = 4096
NO_FREELIST = 0xffffffffffffffff
class BoltDB:
def __init__(self, filename, readonly=False):
self.filename = filename
self.readonly = readonly
self.fd = os.open(filename, os.O_RDWR | os.O_CREAT, 0o666)
if readonly:
fcntl.lockf(self.fd, fcntl.LOCK_SH)
else:
fcntl.lockf(self.fd, fcntl.LOCK_EX)
self.datasz = os.fstat(self.fd).st_size
if self.datasz == 0:
self._init_db_file()
self.mmap = memoryview(mmap.mmap(self.fd, self.datasz, access=mmap.ACCESS_WRITE))
self.meta0 = meta_tuple._make(meta_struct.unpack(
self.mmap[page_struct.size:page_struct.size+meta_struct.size]))
self.pagesize = self.meta0.pageSize
self.max_pgid = self.datasz // self.pagesize
self.lock = threading.Lock()
self.meta_lock = threading.Lock()
self.mmap_lock = RWLock()
if readonly:
self.freelist = None
else:
self.freelist = FreeList()
meta = self.meta()
if meta.freelist == NO_FREELIST:
self.freelist.read_ids(self.freepages())
else:
self.freelist.read(self.page(meta.freelist))
def _init_db_file(self):
buf = memoryview(bytearray(PAGESIZE*4))
for i in range(2):
p = page_from_data(buf[i*PAGESIZE:])
p.id = i
p.flags = metaPageFlag
p.write_header()
meta_struct.pack_into(
buf, i*PAGESIZE+page_struct.size,
0xED0CDAED, 2, PAGESIZE, 0, 3, 0, 2, 4, i, 0
)
i = 2
p = page_from_data(buf[i*PAGESIZE:])
p.id = i
p.flags = freelistPageFlag
p.write_header()
i = 3
p = page_from_data(buf[i*PAGESIZE:])
p.id = i
p.flags = leafPageFlag
p.write_header()
self.datasz = os.write(self.fd, buf)
os.fsync(self.fd)
def meta(self):
self.meta0 = meta_tuple._make(meta_struct.unpack(
self.mmap[page_struct.size:page_struct.size+meta_struct.size]))
self.meta1 = meta_tuple._make(meta_struct.unpack(
self.mmap[self.pagesize+page_struct.size:self.pagesize+page_struct.size+meta_struct.size]))
return self.meta1 if self.meta1.txid > self.meta0.txid else self.meta0
def begin(self, writable=False):
if writable:
if self.readonly:
raise Exception("database is in read-only mode")
self.lock.acquire()
self.meta_lock.acquire()
tx = Tx(self, writable)
self.meta_lock.release()
if not writable:
self.mmap_lock.r_acquire()
return tx
@contextlib.contextmanager
def update(self):
tx = self.begin(True)
try:
yield tx
tx.commit()
except: # noqa
self.freelist.rollback()
raise
finally:
tx.close()
@contextlib.contextmanager
def view(self):
tx = self.begin()
try:
yield tx
finally:
tx.close()
def page(self, pgid):
return page_from_data(self.mmap[self.pagesize*pgid:])
def allocate(self, n):
pgid = self.freelist.allocate(n)
if pgid == 0:
if (self.max_pgid + n) * self.pagesize > self.datasz:
self.datasz = (self.max_pgid + n + 4) * self.pagesize
os.ftruncate(self.fd, self.datasz)
self.mmap_lock.w_acquire()
self.mmap.release()
self.mmap = memoryview(mmap.mmap(self.fd, self.datasz, access=mmap.ACCESS_WRITE))
self.mmap_lock.w_release()
pgid = self.max_pgid
self.max_pgid += n
for i in range(n):
self.freelist.allocate_new(pgid+i)
p = self.page(pgid)
p.id = pgid
p.overflow = n - 1
return p
def freepages(self):
ids = []
with self.view() as tx:
reachable = {}
tx.check_bucket(tx.root, reachable)
for i in range(2, tx.meta.max_pgid):
if i not in reachable:
ids.append(i)
return ids
def close(self):
self.lock.acquire()
self.meta_lock.acquire()
self.mmap_lock.w_acquire()
self.mmap_lock.w_release()
self.meta_lock.release()
self.lock.release()
fcntl.lockf(self.fd, fcntl.LOCK_UN)
self.mmap.release()
os.close(self.fd)
def __del__(self):
self.close()
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,895
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/bucket.py
|
from .cursor import Cursor
from .page import page_from_data
from .node import Node
from .share import bucket_tuple, bucket_struct, bucketLeafFlag
class Bucket:
def __init__(self, tx, root_pgid):
self.tx = tx
self.root_pgid = root_pgid
self.name = ""
self.root_node = None
self.page = None
self.nodes = {}
self.pages = {}
self.sub_buckets = {}
def cursor(self):
return Cursor(self)
def __iter__(self):
return self.cursor()
def get(self, key):
k, v = self.cursor().seek(key)
if k != key:
return None
return v
def put(self, key, value):
if not self.tx.writable:
raise Exception("cannot write in readonly tx")
c = self.cursor()
k, v = c.seek(key)
if k == key and v is None:
raise Exception("cannot write sub bucket")
c.node().put(key, key, value, 0, 0)
def delete(self, key):
if not self.tx.writable:
raise Exception("cannot write in readonly tx")
c = self.cursor()
k, _, flag = c._seek(key)
if k != key:
return
if flag & bucketLeafFlag:
raise Exception("cannot delete sub bucket")
c.node().delete(key)
def bucket(self, name):
if name in self.sub_buckets:
return self.sub_buckets[name]
c = self.cursor()
k, v, flags = c._seek(name)
if k != name or flags & bucketLeafFlag == 0:
return None
b = self._open_bucket(v)
b.name = name
self.sub_buckets[name] = b
return b
def _open_bucket(self, value):
bsize = bucket_struct.size
b = bucket_tuple._make(bucket_struct.unpack(value[:bsize]))
b = Bucket(self.tx, b.root)
if b.root_pgid == 0:
b.page = page_from_data(value[bsize:])
return b
def create_bucket(self, name):
if not self.tx.writable:
raise Exception("cannot write in readonly tx")
c = self.cursor()
k, _, flags = c._seek(name)
if k == name:
if flags & bucketLeafFlag:
raise Exception("bucket already exists")
raise Exception("incompatible value")
b = Bucket(self.tx, 0)
b.root_node = Node(self)
b.root_node.is_leaf = True
value = b.inline_value()
c.node().put(name, name, value, 0, bucketLeafFlag)
self.page = None
return self.bucket(name)
def delete_bucket(self, name):
if not self.tx.writable:
raise Exception("cannot write in readonly tx")
child = self.bucket(name)
if child is None:
raise Exception("bucket not exists")
del self.sub_buckets[name]
c = self.cursor()
c._seek(name)
c.node().delete(name)
# TODO free pages
def page_node(self, pgid):
if self.root_pgid == 0:
if self.root_node is not None:
return None, self.root_node
return self.page, None
if pgid in self.nodes:
return None, self.nodes[pgid]
if pgid in self.pages:
return self.pages[pgid], None
page = self.tx.page(pgid)
self.pages[pgid] = page
return page, None
def node(self, pgid, parent):
if pgid in self.nodes:
return self.nodes[pgid]
n = Node(self)
n.parent = parent
n.pgid = pgid
if parent is None:
self.root_node = n
else:
parent.children.append(n)
p = self.page or self.pages.get(pgid) or self.tx.page(pgid)
n.read(p)
self.nodes[pgid] = n
return n
def inlineable(self):
n = self.root_node
if n is None or not n.is_leaf:
return False
# size = page_struct.size
size = 16
for i in n.inodes:
size += 8 + len(i.key) + len(i.value)
if i.flags * bucketLeafFlag:
return False
if size > 1024:
return False
return True
def inline_value(self):
n = self.root_node
value = memoryview(bytearray(bucket_struct.size+n.size()))
bucket_struct.pack_into(value, 0, 0, 0)
p = page_from_data(value[bucket_struct.size:])
n.write(p)
return value.obj
def spill(self):
for name, child in self.sub_buckets.items():
if child.inlineable():
value = child.inline_value()
else:
child.spill()
value = bucket_struct.pack(child.root_pgid, 0)
if child.root_node is None:
continue
c = self.cursor()
k, _, flags = c._seek(name)
if k != name:
raise Exception("misplaced bucket header")
if flags & bucketLeafFlag == 0:
raise Exception("unexpected bucket header flag")
c.node().put(name, name, value, 0, bucketLeafFlag)
if self.root_node is None:
return
self.root_node.spill()
self.root_node = self.root_node.root()
self.root_pgid = self.root_node.pgid
def rebalance(self):
for n in list(self.nodes.values()):
n.rebalance()
for child in self.sub_buckets.values():
child.rebalance()
def __del__(self):
# self.close()
pass
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,896
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/node.py
|
import functools
from bisect import bisect_left
from .share import page_struct, leaf_elem_struct, branch_elem_struct
@functools.total_ordering
class Inode:
def __init__(self, key, value, pgid, flags):
self.key = key
self.value = value
self.pgid = pgid
self.flags = flags
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
class Node:
def __init__(self, bucket):
self.bucket = bucket
self.is_leaf = False
self.unbalanced = False
self.spilled = False
self.key = None
self.pgid = 0
self.parent = None
self.children = []
self.inodes = []
def root(self):
if self.parent is None:
return self
return self.parent.root()
def size(self):
sz = page_struct.size
elsz = leaf_elem_struct.size \
if self.is_leaf else branch_elem_struct.size
for n in self.inodes:
sz += elsz + len(n.key) + len(n.value)
return sz
def child_at(self, index):
if self.is_leaf:
raise Exception("invalid child_at on a leaf node")
return self.bucket.node(self.inodes[index].pgid, self)
def child_index(self, n):
return bisect_left(self.inodes, n.key)
def num_children(self):
return len(self.inodes)
def next_sibling(self):
if self.parent is None:
return None
index = self.parent.child_index(self)
if index >= self.parent.num_children():
return None
return self.parent.child_at(index+1)
def prev_sibling(self):
if self.parent is None:
return None
index = self.parent.child_index(self)
if index == 0:
return None
return self.parent.child_at(index-1)
def put(self, old_key, new_key, value, pgid, flags):
index = bisect_left(self.inodes, old_key)
exact = self.inodes and index < len(self.inodes) and \
self.inodes[index].key == old_key
n = Inode(new_key, value, pgid, flags)
if exact:
self.inodes[index] = n
else:
self.inodes.insert(index, n)
def delete(self, key):
if not self.inodes:
return
index = bisect_left(self.inodes, key)
if index >= len(self.inodes) or self.inodes[index] != key:
return
self.inodes.pop(index)
self.unbalanced = True
def read(self, p):
if p.is_leaf():
self.is_leaf = True
self.inodes = p.leaf_elems()
else:
self.inodes = p.branch_elems()
if len(self.inodes) > 0:
self.key = self.inodes[0].key
def write(self, p=None):
if p is None:
p, _ = self.bucket.page_node(self.pgid)
if p is None:
p = self.bucket.page or self.bucket.tx.page(self.pgid)
if self.is_leaf:
p.flags = 0x2
else:
p.flags = 0x1
p.write_inodes(self.inodes)
def spill(self):
if self.spilled:
return
for c in self.children:
c.spill()
# print("spill page id", self.pgid, id(self), id(self.parent), self.size())
# print("spill nodes ", len(nodes))
tx = self.bucket.tx
self.children = []
nodes = self.split(tx.db.pagesize)
for n in nodes:
# if n.pgid == 0:
# p = self.bucket.tx.allocate((n.size()+4096-1)//4096)
# n.pgid = p.id
# else:
# p = None
if n.pgid > 0:
tx.db.freelist.free(tx.page(n.pgid))
p = self.bucket.tx.allocate((n.size()+tx.db.pagesize-1)//tx.db.pagesize)
n.pgid = p.id
n.write(p)
n.spilled = True
if n.parent is not None and len(n.inodes) > 0:
key = n.key or n.inodes[0].key
n.parent.put(key, n.inodes[0].key, b"", n.pgid, 0)
n.key = n.inodes[0].key
if self.parent is not None and self.parent.pgid == 0:
self.children = []
self.parent.spill()
def split(self, pagesize):
nodes = []
node = self
while node is not None:
a, b = node.split_two(pagesize)
nodes.append(a)
node = b
return nodes
def split_two(self, pagesize):
# print("split me", id(self), self.size())
if len(self.inodes) <= 2 or self.size() < pagesize:
return self, None
# i = len(self.inodes) // 2
i = self._split_index(pagesize*3/4)
if self.parent is None:
p = Node(self.bucket)
p.children = [self]
self.parent = p
next = Node(self.bucket)
next.is_leaf = self.is_leaf
next.parent = self.parent
next.parent.children.append(next)
inodes = self.inodes
next.inodes = inodes[i:]
self.inodes = inodes[:i]
return self, next
def _split_index(self, threshold):
sz = page_struct.size
elsz = leaf_elem_struct.size \
if self.is_leaf else branch_elem_struct.size
for i, n in enumerate(self.inodes):
sz += elsz + len(n.key) + len(n.value)
if i >= 2 and sz > threshold:
return i
def rebalance(self):
if not self.unbalanced:
return
self.unbalanced = False
# print("rebalance", self.pgid, len(self.inodes))
threshold = self.bucket.tx.db.pagesize/4
if self.size() > threshold and len(self.inodes) > 2:
return
if self.parent is None:
if self.num_children() == 0:
self.is_leaf = True
return
# or this
if not self.is_leaf and len(self.inodes) == 1:
child = self.bucket.node(self.inodes[0].pgid, self)
self.is_leaf = child.is_leaf
self.inodes = child.inodes[:]
self.children = child.children
for i in self.inodes:
if i.pgid in self.bucket.nodes:
self.bucket.nodes[i.pgid].parent = self
child.parent = None
del self.bucket.nodes[child.pgid]
child.free()
return
if self.num_children() == 0:
self.parent.delete(self.key)
self.parent.remove_child(self)
del self.bucket.nodes[self.pgid]
self.free()
self.parent.rebalance()
def remove_child(self, child):
self.children.remove(child)
def free(self):
if self.pgid != 0:
# freelist
pass
self.unbalanced = False
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,897
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/tests/test_rw.py
|
import os
import unittest
import tempfile
import threading
from boltdb import BoltDB
from boltdb.rwlock import RWLock
class BRWLock(RWLock):
def w_acquire(self):
if not self.w_lock.acquire(timeout=0.01):
raise Exception("blocked")
class BLock:
def __init__(self):
self.lock = threading.Lock()
def acquire(self):
if not self.lock.acquire(timeout=0.01):
raise Exception("blocked")
def release(self):
return self.lock.release()
class TestRW(unittest.TestCase):
def setUp(self):
self.db = BoltDB(tempfile.mktemp())
def tearDown(self):
os.unlink(self.db.filename)
def test_rw(self):
# replace mmap_lock
self.db.mmap_lock = BRWLock()
wtx = self.db.begin(True)
rtx = self.db.begin(False)
wtx.bucket().put(b"foo", b"bar")
self.assertIsNone(rtx.bucket().get(b"foo"))
# writer will block on mmap
with self.assertRaisesRegex(Exception, "blocked"):
wtx.commit()
wtx.close()
rtx.close()
def test_ww(self):
self.db.lock = BLock()
wtx = self.db.begin(True)
# only one writer at a time
with self.assertRaisesRegex(Exception, "blocked"):
self.db.begin(True)
wtx.close()
with self.db.update():
pass
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,898
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/tests/test_free.py
|
import os
import unittest
import tempfile
from boltdb import BoltDB
class TestFree(unittest.TestCase):
def setUp(self):
self.db = BoltDB(tempfile.mktemp())
def tearDown(self):
os.unlink(self.db.filename)
def test_free(self):
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(self.db.freelist.ids, [3])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(self.db.freelist.ids, [4])
def test_free2(self):
self.assertEqual(self.db.freepages(), [2])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(sorted(self.db.freepages()), [2, 3])
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
self.assertEqual(sorted(self.db.freepages()), [2, 4])
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,899
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/share.py
|
import struct
from collections import namedtuple
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
freelistPageFlag = 0x10
bucketLeafFlag = 0x01
page_tuple = namedtuple('page', 'id flags count overflow')
page_struct = struct.Struct("QHHI")
meta_tuple = namedtuple('meta', 'magic version pageSize flags root_pgid sequence freelist max_pgid txid checksum')
meta_struct = struct.Struct("IIIIQQQQQQ")
leaf_elem_tuple = namedtuple("leaf_elem", "flags pos ksize vsize")
leaf_elem_struct = struct.Struct("IIII")
branch_elem_tuple = namedtuple("branch_elem", "pos ksize pgid")
branch_elem_struct = struct.Struct("IIQ")
bucket_tuple = namedtuple("bucket", "root dequence")
bucket_struct = struct.Struct("QQ")
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,900
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/__init__.py
|
from .db import BoltDB # noqa
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,901
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/cursor.py
|
from bisect import bisect_left
class ElemRef:
def __init__(self, page, node, index):
self.page = page
self.node = node
self.index = index
def is_leaf(self):
if self.node is not None:
return self.node.is_leaf
return self.page.is_leaf()
def count(self):
if self.node is not None:
return len(self.node.inodes)
return self.page.count
class Cursor:
def __init__(self, bucket):
self.bucket = bucket
self.stack = []
def first(self):
self.stack = []
p, n = self.bucket.page_node(self.bucket.root_pgid)
self.stack.append(ElemRef(p, n, 0))
self._first()
k, v, flags = self.key_value()
if flags & 0x1:
return k, None
return k, v
def last(self):
self.stack = []
p, n = self.bucket.page_node(self.bucket.root_pgid)
ref = ElemRef(p, n, 0)
ref.index = ref.count() - 1
self.stack.append(ref)
self._last()
k, v, flags = self.key_value()
if flags & 0x1:
return k, None
return k, v
def next(self):
k, v, flags = self._next()
if flags & 0x1:
return k, None
return k, v
def __next__(self):
if len(self.stack) == 0:
k, v = self.first()
else:
k, v = self.next()
if k is None:
raise StopIteration
return k, v
def prev(self):
pass
def seek(self, key):
k, v, flags = self._seek(key)
ref = self.stack[-1]
if ref.index >= ref.count():
k, v, flags = self._next()
if k is None:
return None, None
if flags & 0x1:
return k, None
return k, v
def _first(self):
# go to fisrt leaf
while True:
ref = self.stack[-1]
if ref.is_leaf():
break
if ref.node is not None:
pgid = ref.node.inodes[ref.index].pgid
else:
pgid = ref.page.branch_elems()[ref.index].pgid
p, n = self.bucket.page_node(pgid)
self.stack.append(ElemRef(p, n, 0))
def _last(self):
# go to last leaf
while True:
ref = self.stack[-1]
if ref.is_leaf():
break
if ref.node is not None:
pgid = ref.node.inodes[ref.index].pgid
else:
pgid = ref.page.branch_elems()[ref.index].pgid
p, n = self.bucket.page_node(pgid)
ref = ElemRef(p, n, 0)
ref.index = ref.count() - 1
self.stack.append(ref)
def _next(self):
while True:
i = len(self.stack) - 1
while i >= 0:
ref = self.stack[i]
if ref.index < ref.count() - 1:
ref.index += 1
break
i -= 1
if i == -1:
return None, None, 0
self.stack = self.stack[:i+1]
self._first()
if self.stack[-1].count() == 0:
continue
return self.key_value()
def _seek(self, key):
self.stack = []
self._search(key, self.bucket.root_pgid)
return self.key_value()
def _search(self, key, pgid):
p, n = self.bucket.page_node(pgid)
ref = ElemRef(p, n, 0)
self.stack.append(ref)
if ref.is_leaf():
if n is not None:
inodes = n.inodes
else:
inodes = ref.page.leaf_elems()
ref.index = bisect_left(inodes, key)
else:
if n is not None:
inodes = n.inodes
else:
inodes = p.branch_elems()
index = bisect_left(inodes, key)
if index == len(inodes) or (index > 0 and inodes[index].key != key):
index -= 1
ref.index = index
self._search(key, inodes[index].pgid)
def key_value(self):
ref = self.stack[-1]
if ref.count() == 0 or ref.index >= ref.count():
return None, None, 0
if ref.node is not None:
n = ref.node.inodes[ref.index]
else:
n = ref.page.leaf_elems()[ref.index]
return n.key, n.value, n.flags
def node(self):
ref = self.stack[-1]
if ref.node is not None and ref.is_leaf():
return ref.node
n = self.stack[0].node
if n is None:
n = self.bucket.node(self.stack[0].page.id, None)
for ref in self.stack[:-1]:
n = n.child_at(ref.index)
return n
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,902
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/boltdb/freelist.py
|
from .share import page_struct, freelistPageFlag
class FreeList:
def __init__(self):
self.ids = []
self.pending = []
self.allocs = []
self.cache = set()
def size(self):
n = self.count()
if n >= 0xffff:
n += 1
return page_struct.size + 8 * n
def count(self):
return self.free_count() + self.pending_count()
def free_count(self):
return len(self.ids)
def pending_count(self):
return len(self.pending_count)
def allocate(self, n):
if len(self.ids) == 0:
return 0
initial = previd = 0
for i, id in enumerate(self.ids):
if previd == 0 or id - previd != 1:
initial = id
if id - initial + 1 == n:
if i + 1 == n:
self.ids = self.ids[i+1:]
else:
del self.ids[i-n+1:i+1]
for i in range(n):
self.cache.remove(i+initial)
self.allocs.append(i+initial)
return initial
previd = id
return 0
def allocate_new(self, pid):
self.allocs.append(pid)
def free(self, p):
if p.id <= 1:
raise Exception("cannot free page 0 or 1")
if p.id in self.cache:
raise Exception("page already freed")
for i in range(p.overflow+1):
self.pending.append(p.id+i)
self.cache.add(p.id+i)
def rollback(self):
self.ids = sorted(self.ids + self.allocs)
self.cache.clear()
self.pending.clear()
for id in self.ids:
self.cache.add(id)
def read(self, p):
if p.flags != freelistPageFlag:
raise Exception("invalid freelist page")
self.ids = p.free_ids()
self.cache = set()
for id in self.ids:
self.cache.add(id)
def read_ids(self, ids):
self.ids = ids
self.cache = set()
for id in self.ids:
self.cache.add(id)
def write(self, p):
self.ids = sorted(self.ids+self.pending)
self.allocs.clear()
self.pending.clear()
p.write_ids(self.ids)
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,903
|
pfalcon-mirrors/boltdb
|
refs/heads/main
|
/tests/test_bucket.py
|
import os
import unittest
import tempfile
from boltdb import BoltDB
class TestBucket(unittest.TestCase):
def setUp(self):
self.db = BoltDB(tempfile.mktemp())
def tearDown(self):
os.unlink(self.db.filename)
def test_get_nonexistent(self):
with self.db.update() as tx:
b = tx.bucket()
v = b.get(b"foo")
self.assertIsNone(v)
def test_get_from_node(self):
with self.db.update() as tx:
b = tx.bucket()
b.put(b"foo", b"bar")
v = b.get(b"foo")
self.assertEqual(v, b"bar")
def test_get_bucket_is_none(self):
with self.db.update() as tx:
tx.create_bucket(b"widgets")
tx.bucket(b"widgets").create_bucket(b"foo")
self.assertIsNone(tx.bucket(b"widgets").get(b"foo"))
def test_put(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.put(b"foo", b"bar")
v = tx.bucket(b"widgets").get(b"foo")
self.assertEqual(v, b"bar")
def test_put_repeat(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.put(b"foo", b"bar")
b.put(b"foo", b"baz")
v = tx.bucket(b"widgets").get(b"foo")
self.assertEqual(v, b"baz")
def test_put_large(self):
count, factor = 100, 200
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
for i in range(1, count):
b.put(b"0"*i*factor, b"X"*(count-1)*factor)
with self.db.view() as tx:
b = tx.bucket(b"widgets")
for i in range(1, count):
v = b.get(b"0"*i*factor)
self.assertEqual(v, b"X"*(count-1)*factor)
def test_put_incompatible(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
tx.bucket(b"widgets").create_bucket(b"foo")
with self.assertRaisesRegex(Exception, "cannot write sub bucket"):
b.put(b"foo", b"bar")
def test_iter(self):
orderd_tyes = b"abcdefghijklmnopqrstuvwxyz"
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
for i in range(len(orderd_tyes)):
b.put(orderd_tyes[i:], b"foo")
with self.db.view() as tx:
b = tx.bucket(b"widgets")
for i, (k, _) in enumerate(b):
self.assertEqual(k, orderd_tyes[i:])
self.assertEqual(i, len(orderd_tyes)-1)
def test_delete(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.put(b"foo", b"bar")
b.delete(b"foo")
v = tx.bucket(b"widgets").get(b"foo")
self.assertIsNone(v)
def test_delete_large(self):
count = 100
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
for i in range(count):
b.put(str(i).encode(), b"*" * 1024)
with self.db.update() as tx:
b = tx.bucket(b"widgets")
for i in range(count):
b.delete(str(i).encode())
with self.db.view() as tx:
b = tx.bucket(b"widgets")
for i in range(count):
v = b.get(str(i).encode())
self.assertIsNone(v)
def test_delete_nonexisting(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.create_bucket(b"nested")
with self.db.update() as tx:
b = tx.bucket(b"widgets")
b.delete(b"foo")
self.assertIsNotNone(b.bucket(b"nested"))
def test_nested(self):
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.create_bucket(b"foo")
b.put(b"bar", b"0000")
# Update widgets/bar.
with self.db.update() as tx:
b = tx.bucket(b"widgets")
b.put(b"bar", b"xxxx")
# Cause a split.
with self.db.update() as tx:
b = tx.bucket(b"widgets")
for i in range(10000):
k = str(i).encode()
b.put(k, k)
# Insert into widgets/foo/baz.
with self.db.update() as tx:
b = tx.bucket(b"widgets").bucket(b"foo")
b.put(b"baz", b"yyyy")
with self.db.view() as tx:
b = tx.bucket(b"widgets")
v = b.bucket(b"foo").get(b"baz")
self.assertEqual(v, b"yyyy")
v = b.get(b"bar")
self.assertEqual(v, b"xxxx")
for i in range(10000):
k = str(i).encode()
v = b.get(k)
self.assertEqual(v, k)
def test_delete_a_bucket(self):
# Ensure that deleting a bucket using delete() returns an error.
with self.db.update() as tx:
b = tx.create_bucket(b"widgets")
b.create_bucket(b"foo")
with self.assertRaises(Exception):
b.delete(b"foo")
def test_delete_bucket_nested(self):
with self.db.update() as tx:
widgets = tx.create_bucket(b"widgets")
foo = widgets.create_bucket(b"foo")
bar = foo.create_bucket(b"bar")
bar.put(b"baz", b"bat")
widgets.delete_bucket(b"foo")
self.assertIsNone(widgets.bucket(b"foo"))
with self.db.view() as tx:
widgets = tx.bucket(b"widgets")
self.assertIsNone(widgets.bucket(b"foo"))
def test_delete_bucket_large(self):
with self.db.update() as tx:
widgets = tx.create_bucket(b"widgets")
foo = widgets.create_bucket(b"foo")
for i in range(1000):
k = str(i).encode()
foo.put(k, k)
with self.db.update() as tx:
widgets = tx.delete_bucket(b"widgets")
with self.db.update() as tx:
widgets = tx.create_bucket(b"widgets")
self.assertIsNone(widgets.bucket(b"foo"))
def test_create_bucket_incompatible(self):
with self.db.update() as tx:
widgets = tx.create_bucket(b"widgets")
widgets.put(b"foo", b"bar")
with self.assertRaisesRegex(Exception, "incompatible value"):
widgets.create_bucket(b"foo")
def test_delete_bucket_incompatible(self):
with self.db.update() as tx:
widgets = tx.create_bucket(b"widgets")
widgets.put(b"foo", b"bar")
with self.assertRaisesRegex(Exception, "bucket not exists"):
widgets.delete_bucket(b"foo")
|
{"/boltdb/tx.py": ["/boltdb/bucket.py", "/boltdb/share.py"], "/boltdb/page.py": ["/boltdb/node.py", "/boltdb/share.py"], "/boltdb/db.py": ["/boltdb/freelist.py", "/boltdb/page.py", "/boltdb/tx.py", "/boltdb/rwlock.py", "/boltdb/share.py"], "/boltdb/bucket.py": ["/boltdb/cursor.py", "/boltdb/page.py", "/boltdb/node.py", "/boltdb/share.py"], "/boltdb/node.py": ["/boltdb/share.py"], "/tests/test_rw.py": ["/boltdb/__init__.py", "/boltdb/rwlock.py"], "/tests/test_free.py": ["/boltdb/__init__.py"], "/boltdb/__init__.py": ["/boltdb/db.py"], "/boltdb/freelist.py": ["/boltdb/share.py"], "/tests/test_bucket.py": ["/boltdb/__init__.py"]}
|
39,913
|
Eseb/corpus-cleaner
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
from distutils.core import setup
setup(name="corpus-cleaner",
version="0.1.0",
description="Natural-language corpus cleaning scripts",
author="Seb Bacanu",
keywords=["smt", "nlp", "corpus"],
author_email="seb@bacanu.com",
url="https://github.com/Eseb/corpus-cleaner",
download_url="https://github.com/Eseb/corpus-cleaner/tarball/v0.1.0",
packages=["corpus_cleaner"],
scripts=['corpus_cleaner/scrubber.py', 'corpus_cleaner/equaliser.py'])
|
{"/corpus_cleaner/equaliser.py": ["/corpus_cleaner/shared.py"], "/corpus_cleaner/scrubber.py": ["/corpus_cleaner/shared.py"]}
|
39,914
|
Eseb/corpus-cleaner
|
refs/heads/master
|
/corpus_cleaner/shared.py
|
import re
DEFAULT_STOP_CHARS = [".", ";", "!", "?"]
def join_regex(target_list):
"""
Takes a list of chars and joins then into a regex-friendly search string
"""
return "|".join([re.escape(item) for item in target_list])
def ensure_arg(condition_met, failure_message, parser):
"""
Make sure that the given arg is valid
"""
if condition_met:
return
print(failure_message)
parser.print_usage()
exit()
|
{"/corpus_cleaner/equaliser.py": ["/corpus_cleaner/shared.py"], "/corpus_cleaner/scrubber.py": ["/corpus_cleaner/shared.py"]}
|
39,915
|
Eseb/corpus-cleaner
|
refs/heads/master
|
/corpus_cleaner/equaliser.py
|
#!/usr/bin/env python
import re
from argparse import ArgumentParser
from os.path import exists, basename
from corpus_cleaner.shared import DEFAULT_STOP_CHARS, join_regex, ensure_arg
DEFAULT_SENTENCE_RATIO = 0.6
DEFAULT_LOWERCASE_GLUED = True
A_IS_LONGER = -1
BOTH_EQUIVALENT = 0
B_IS_LONGER = 1
def split(text):
"""
Splits text into a list of paragraphs, each of which is a list of sentences.
"""
paragraphs = text.split("\n\n")
paragraphs = [paragraph.split("\n") for paragraph in paragraphs]
return paragraphs
def merge(paragraphs):
"""
Merges the list-inside-list paragraph format back into one string
"""
paragraphs = ["\n".join(sentences) for sentences in paragraphs]
return "\n\n".join(filter(lambda paragraph: len(paragraph) != 0, paragraphs))
def compare_sentences(sentence_a, sentence_b, sentence_ratio=DEFAULT_SENTENCE_RATIO):
"""
Returns (A_IS_HIGHER | SENTENCES_EQUIVALENT | B_IS_HIGHER) while keeping into account sentence ratio.
"""
a_word_count = len(sentence_a.split(" "))
b_word_count = len(sentence_b.split(" "))
a_higher = a_word_count * sentence_ratio > b_word_count
b_higher = b_word_count * sentence_ratio > a_word_count
if not (a_higher or b_higher):
return BOTH_EQUIVALENT
return A_IS_LONGER if a_higher else B_IS_LONGER
def glue_sentences(first, second, lowercase_glued=DEFAULT_LOWERCASE_GLUED, stop_chars=DEFAULT_STOP_CHARS):
"""
Attached two sentences together, removing punctuation
"""
if lowercase_glued and len(second) != 0:
second = second[0].lower() + second[1:]
# Remove stop-char from the first sentence
first = re.sub(r"({})$".format(join_regex(stop_chars)), "", first)
return "{} {}".format(first, second)
def equalise_paragraphs(a_para, b_para, sentence_ratio=DEFAULT_SENTENCE_RATIO, lowercase_glued=DEFAULT_LOWERCASE_GLUED,
stop_chars=DEFAULT_STOP_CHARS):
"""
Glues together two collections of sentences so that they're of similar
word-length. Discards sentences it cannot make parallel.
"""
equalised_a_para = []
equalised_b_para = []
a_index = 0
b_index = 0
# Keep merging while we still have sentences to draw from
while a_index < len(a_para) and b_index < len(b_para):
a_sentence = a_para[a_index]
b_sentence = b_para[b_index]
comparison_result = compare_sentences(a_sentence, b_sentence, sentence_ratio=sentence_ratio)
try:
if comparison_result == B_IS_LONGER:
next_a_sentence = a_para[a_index + 1]
glued = glue_sentences(a_sentence, next_a_sentence,
lowercase_glued=lowercase_glued, stop_chars=stop_chars)
if compare_sentences(glued, b_sentence, sentence_ratio=sentence_ratio) != A_IS_LONGER:
# Expand the next sentence and move the index to it for the next pass
a_para[a_index + 1] = glued
a_index += 1
else:
# Force a push
comparison_result = BOTH_EQUIVALENT
elif comparison_result == A_IS_LONGER:
next_sentence = b_para[b_index + 1]
glued = glue_sentences(b_sentence, next_sentence,
lowercase_glued=lowercase_glued, stop_chars=stop_chars)
if compare_sentences(a_sentence, glued, sentence_ratio=sentence_ratio) != B_IS_LONGER:
# Expand the next sentence and move the index to it for the next pass
b_para[b_index + 1] = glued
b_index += 1
else:
# Force a push
comparison_result = BOTH_EQUIVALENT
if comparison_result == BOTH_EQUIVALENT:
# Sentences are close enough to being equal. Keep going.
equalised_a_para.append(a_sentence)
equalised_b_para.append(b_sentence)
a_index += 1
b_index += 1
except IndexError:
# Hit if we try to access any next_sentence that doesn't exist. We're done if that happens.
break
return equalised_a_para, equalised_b_para
def equalise(text_a, text_b, sentence_ratio=DEFAULT_SENTENCE_RATIO, lowercase_glued=DEFAULT_LOWERCASE_GLUED,
stop_chars=DEFAULT_STOP_CHARS):
"""
Assuming that paragraphs are equivalent, compact the 2 texts into
equal-paragraph-count and equal-sentence-count versions of themselves.
"""
a_paragraphs = split(text_a)
b_paragraphs = split(text_b)
equalised_a_paragraphs = []
equalised_b_paragraphs = []
fewest_paragraph_count = min(len(a_paragraphs), len(b_paragraphs))
for paragraph_index in range(0, fewest_paragraph_count):
a_para = a_paragraphs[paragraph_index]
b_para = b_paragraphs[paragraph_index]
equalised_a_para, equalised_b_para = equalise_paragraphs(a_para, b_para, sentence_ratio=sentence_ratio,
lowercase_glued=lowercase_glued, stop_chars=stop_chars)
equalised_a_paragraphs.append(equalised_a_para)
equalised_b_paragraphs.append(equalised_b_para)
return equalised_a_paragraphs, equalised_b_paragraphs
def equalise_file(file_a, file_b, sentence_ratio=DEFAULT_SENTENCE_RATIO, lowercase_glued=DEFAULT_LOWERCASE_GLUED,
stop_chars=DEFAULT_STOP_CHARS):
"""
Performs equalisation process on two files.
"""
with open(file_a, "r") as input_file:
file_a_contents = input_file.read()
with open(file_b, "r") as input_file:
file_b_contents = input_file.read()
original_corpus_size = (len(file_a_contents) + len(file_b_contents)) / 2
equalised_a, equalised_b = equalise(file_a_contents, file_b_contents, sentence_ratio=sentence_ratio,
lowercase_glued=lowercase_glued, stop_chars=stop_chars)
equalised_a = merge(equalised_a)
equalised_b = merge(equalised_b)
with open(file_a, "w") as output_file:
output_file.write(equalised_a)
with open(file_b, "w") as output_file:
output_file.write(equalised_b)
equalised_corpus_size = (len(equalised_a) + len(equalised_b)) / 2
corpus_lost = int((1.0 - float(equalised_corpus_size) / original_corpus_size) * 100)
print("Scrubbed {} & {}. Corpus lost: {}%".format(basename(file_a), basename(file_b), corpus_lost))
def create_arg_parser():
description = "Makes two corpus files of equal-sentence-length by merging sentences where it thinks appropriate."
parser = ArgumentParser(description=description)
parser.add_argument("file_a", type=str,
help="Input file A. Order is irrelevant.")
parser.add_argument("file_b", type=str,
help="Input file B. Order is irrelevant.")
parser.add_argument("-r, --ratio", dest="ratio", type=float, default=DEFAULT_SENTENCE_RATIO,
help=" ".join([
"How close in word-count sentences have to be to be considered equivalent as a 0 to 1 ratio",
"Default: {}".format(DEFAULT_SENTENCE_RATIO)
]))
default_stop_chars = "".join(DEFAULT_STOP_CHARS)
parser.add_argument("-s, --stop-chars", dest="stop", metavar="stop-chars", type=str, default=default_stop_chars,
help="Chars defining sentence boundaries. Default: {}".format(default_stop_chars))
parser.add_argument("-k, --keep-case", dest="keep_case", default=False,
action="store_true", help="Don't lowercase the first char of the second sentence in a merge.")
return parser
if __name__ == "__main__":
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
user_stop_chars = list(args.stop)
user_lowercase_glued = not args.keep_case
ensure_arg(exists(args.file_a), "File A doesn't exist.", arg_parser)
ensure_arg(exists(args.file_b), "File B doesn't exist.", arg_parser)
ensure_arg(len(user_stop_chars) != 0, "Stop characters are invalid.", arg_parser)
equalise_file(args.file_a, args.file_b, sentence_ratio=args.ratio, stop_chars=user_stop_chars,
lowercase_glued=user_lowercase_glued)
|
{"/corpus_cleaner/equaliser.py": ["/corpus_cleaner/shared.py"], "/corpus_cleaner/scrubber.py": ["/corpus_cleaner/shared.py"]}
|
39,916
|
Eseb/corpus-cleaner
|
refs/heads/master
|
/corpus_cleaner/scrubber.py
|
#!/usr/bin/env python
import codecs
import re
from argparse import ArgumentParser
from os.path import exists
from corpus_cleaner.shared import DEFAULT_STOP_CHARS, join_regex, ensure_arg
DEFAULT_REORDER_CHARS = ["\"", "'", ")", "]", "}"]
DEFAULT_INPUT_ENCODING = "utf-8"
DEFAULT_OUTPUT_ENCODING = "utf-8"
def remove_excessive_whitespace(text):
"""
Ensures that there aren't too many linebreaks.
"""
# limit blank lines to 2
text = re.sub(r"\n\n+", r"\n\n", text)
# remove whitespace at the start of the document
text = re.sub(r"^\s*([^\s])", r"\1", text)
# remove spaces at the start and end of lines
text = re.sub(r" *\n *", r"\n", text)
# limit consecutive spaces to 0
text = re.sub(r" +", " ", text)
# remove tabs
text = text.replace("\t", "")
return text
def remove_columns(input_text):
"""
Ensures that text isn't split up into columns arbitrarily. Concatenates what it thinks are
single sentences into one line.
"""
paragraphs = re.split("\n\n", input_text)
solid_paragraphs = []
for paragraph in paragraphs:
lines = re.split("\n", paragraph)
solid_paragraphs.append(" ".join(lines))
return "\n\n".join(solid_paragraphs)
def reorder_stop_chars(input_text, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS):
"""
Fixes problematic orders of stop chars, e.g. American-style quotes where the closing
quotation mark is outside of the punctuation: "Hello." -> "Hello".
"""
return re.sub(r"({})({})".format(
join_regex(stop_chars),
join_regex(reorder_chars)
), r"\2\1", input_text)
def split_as_one_sentence_per_line(input_text, stop_chars=DEFAULT_STOP_CHARS):
"""
Splits paragraphs into one-sentence-per-line.
"""
sentence_per_line = re.sub(r"({}) *".format(join_regex(stop_chars)), r"\1\n", input_text)
# Remove last line break
return re.sub(r"\n$", "", sentence_per_line)
def scrub(text, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS):
"""
Scrub text. Runs the relevant functions in an appropriate order.
"""
text = reorder_stop_chars(text, stop_chars=stop_chars, reorder_chars=reorder_chars)
text = remove_columns(text)
text = split_as_one_sentence_per_line(text, stop_chars=stop_chars)
text = remove_excessive_whitespace(text)
return text
def scrub_file(input_path, output_path=None, stop_chars=DEFAULT_STOP_CHARS, reorder_chars=DEFAULT_REORDER_CHARS,
input_encoding=DEFAULT_INPUT_ENCODING, output_encoding=DEFAULT_OUTPUT_ENCODING):
"""
Run scrubbing on an entire file's contents. Overwrites if no input is given.
"""
# Overwrite file if no output path is given
output_path = output_path if output_path is not None else input_path
with codecs.open(input_path, "r", input_encoding) as input_file:
file_contents = input_file.read()
scrubbed_contents = scrub(file_contents, stop_chars=stop_chars, reorder_chars=reorder_chars)
with codecs.open(output_path, "w", output_encoding) as output_file:
output_file.write(scrubbed_contents)
print("Scrubbed {} to {}".format(input_path, output_path))
def create_arg_parser():
description = "Tidies up natural language files into one-sentence-per-line easily-parsed files."
parser = ArgumentParser(description=description)
parser.add_argument("input", type=str,
help="Input file to scrub.")
parser.add_argument("-o, --output", metavar="output-path", type=str, dest="output",
help="Scrubbed version output path.")
parser.add_argument("--input-encoding", metavar="encoding", type=str, dest="input_encoding",
default=DEFAULT_INPUT_ENCODING,
help=" ".join([
"What encoding to interpret the input files as being.",
"Default: {}".format(DEFAULT_INPUT_ENCODING)
]))
parser.add_argument("--output-encoding", metavar="encoding", type=str, dest="output_encoding",
default=DEFAULT_OUTPUT_ENCODING,
help="What encoding to save the output files as. Default: {}".format(DEFAULT_OUTPUT_ENCODING))
default_stop_chars = "".join(DEFAULT_STOP_CHARS)
parser.add_argument("-s, --stop-chars", metavar="stop-chars", dest="stop", type=str, default=default_stop_chars,
help="Chars defining sentence boundaries. Default: {}".format(default_stop_chars))
default_reorder_chars = "".join(DEFAULT_REORDER_CHARS)
parser.add_argument("-r, --reorder", metavar="reorderable", dest="reorder", type=str,
default=default_reorder_chars,
help="Chars which can be swapped with stop chars. Default: {}".format(default_reorder_chars))
return parser
if __name__ == "__main__":
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
parsed_stop_chars = list(args.stop)
parsed_reorder_chars = list(args.reorder)
ensure_arg(exists(args.input), "Input file doesn't exist.", arg_parser)
ensure_arg(len(parsed_stop_chars) > 0, "Stop characters are invalid", arg_parser)
ensure_arg(len(parsed_stop_chars) > 0, "Reorderable chars missing.", arg_parser)
scrub_file(args.input, args.output, stop_chars=parsed_stop_chars, reorder_chars=parsed_reorder_chars,
input_encoding=args.input_encoding, output_encoding=args.output_encoding)
|
{"/corpus_cleaner/equaliser.py": ["/corpus_cleaner/shared.py"], "/corpus_cleaner/scrubber.py": ["/corpus_cleaner/shared.py"]}
|
39,917
|
Eseb/corpus-cleaner
|
refs/heads/master
|
/corpus_cleaner/tests.py
|
from unittest import TestCase, main
import re
from corpus_cleaner import scrubber, equaliser
def prepare_test_string(string):
"""
Removes the line-breaks at the start and end of the test strings, as well as tabs
"""
# Remove tab-spaces in multiples of 4 only
string = re.sub(r"(^|\n)( {4}|\t)+", r"\1", string)
string = re.sub(r"(^\n)|(\n$)", "", string)
return string
class ScrubTest(TestCase):
def test_remove_excessive_whitespace(self):
self.assertEqual(
prepare_test_string("""
This line should not have any linebreaks above it, and only two below it.
There should be no spaces or tabs in front or at the end of this line.
There should only be one space between words.
"""),
scrubber.remove_excessive_whitespace(prepare_test_string("""
This line should not have any linebreaks above it, and only two below it.
\tThere should be no spaces or tabs in front or at the end of this line.
There should only be one space between words.
"""))
)
def test_remove_columns(self):
self.assertEqual(
"This paragraph is split up into columns for no good reason.",
scrubber.remove_columns(prepare_test_string("""
This paragraph is
split up into columns
for no good reason.
"""))
)
def test_reorder_stop_chars(self):
self.assertEqual(
"American English calls full stops 'periods'; they also insert them before closing \"quotation marks\"!",
scrubber.reorder_stop_chars(
"American English calls full stops 'periods;' they also insert them before closing \"quotation marks!\""
)
)
def test_split_as_one_sentence_per_line(self):
self.assertEqual(
prepare_test_string("""
This is the first sentence in this paragraph.
This is the second--should be on its own line.
"""),
# One-line
scrubber.split_as_one_sentence_per_line(prepare_test_string("""
This is the first sentence in this paragraph. This is the second--should be on its own line.
"""))
)
class EqualiserTest(TestCase):
def test_split(self):
self.assertEqual(
[
["P1S1.", "P1S2."],
["P2S1.", "P2S2.", "P2S3."]
],
equaliser.split(prepare_test_string("""
P1S1.
P1S2.
P2S1.
P2S2.
P2S3.
"""))
)
def test_compare_sentences(self):
self.assertEqual(
equaliser.BOTH_EQUIVALENT,
equaliser.compare_sentences(
"these sentences are pretty much equivalent",
"these sentences are actually equivalent"
)
)
self.assertEqual(
equaliser.A_IS_LONGER,
equaliser.compare_sentences(
"this sentence is longer than the second one",
"i am short"
)
)
self.assertEqual(
equaliser.B_IS_LONGER,
equaliser.compare_sentences(
"now i am short",
"i am long enough to beat you this time"
)
)
def test_glue_sentences(self):
self.assertEqual(
"I like apples i am lowercase.",
equaliser.glue_sentences(
"I like apples.",
"I am lowercase.",
lowercase_glued=True
)
)
self.assertEqual(
"I like apples I am uppercase.",
equaliser.glue_sentences(
"I like apples.",
"I am uppercase.",
lowercase_glued=False
)
)
def test_equalise_paragraphs(self):
a_result, b_result = equaliser.equalise_paragraphs([
"I should be left alone.", "I should get merged.", "with this one.", "I should not be merged",
"Because this one is so long, merging me with the previous one is a bad idea."
], [
"I should also be left alone.", "I am really damn long, so I should force a merge.",
"I shouldn't force a merge in the other paragraph.",
"The three of us.", "Myself included, remember!", "Will all get merged together.",
"I will be dropped."
])
a_expected = [
"I should be left alone.", "I should get merged with this one.",
"I should not be merged", "Because this one is so long, merging me with the previous one is a bad idea."
]
b_expected = [
"I should also be left alone.", "I am really damn long, so I should force a merge.",
"I shouldn't force a merge in the other paragraph.",
"The three of us myself included, remember will all get merged together."
]
self.assertEqual((a_expected, b_expected), (a_result, b_result))
# The whole point is to have equal sentence counts
self.assertEqual(len(a_result), len(b_result))
if __name__ == "__main__":
main()
|
{"/corpus_cleaner/equaliser.py": ["/corpus_cleaner/shared.py"], "/corpus_cleaner/scrubber.py": ["/corpus_cleaner/shared.py"]}
|
39,918
|
ImreSamu/osm-gimmisn
|
refs/heads/master
|
/tests/test_wsgi.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Miklos Vajna and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_wsgi module covers the wsgi module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import io
import os
import unittest
import unittest.mock
import xml.etree.ElementTree as ET
import wsgi
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse
class TestWsgi(unittest.TestCase):
"""Base class for wsgi tests."""
def get_dom_for_path(self, path: str) -> ET.Element:
"""Generates an XML DOM for a given wsgi path."""
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
# Make sure the built-in exception catcher is not kicking in.
self.assertEqual(status, "200 OK")
header_dict = {key: value for (key, value) in response_headers}
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
def get_abspath(path: str) -> str:
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
with unittest.mock.patch('util.get_abspath', get_abspath):
environ = {
"PATH_INFO": path
}
callback = cast('StartResponse', start_response) # type: StartResponse
output_iterable = wsgi.application(environ, callback)
output_list = cast(List[bytes], output_iterable)
self.assertTrue(output_list)
output = output_list[0].decode('utf-8')
stream = io.StringIO(output)
tree = ET.parse(stream)
root = tree.getroot()
return root
class TestStreets(TestWsgi):
"""Tests handle_streets()."""
def test_well_formed(self) -> None:
"""Tests if the output is well-formed."""
root = self.get_dom_for_path("/osm/streets/gazdagret/view-result")
results = root.findall("body/table")
self.assertEqual(len(results), 1)
def test_view_query_well_formed(self) -> None:
"""Tests if the view-query output is well-formed."""
root = self.get_dom_for_path("/osm/streets/gazdagret/view-query")
results = root.findall("body/pre")
self.assertEqual(len(results), 1)
class TestMissingHousenumbers(TestWsgi):
"""Tests the missing house numbers page."""
def test_well_formed(self) -> None:
"""Tests if the output is well-formed."""
root = self.get_dom_for_path("/osm/missing-housenumbers/gazdagret/view-result")
results = root.findall("body/table")
self.assertEqual(len(results), 1)
class TestStreetHousenumbers(TestWsgi):
"""Tests handle_street_housenumbers()."""
def test_view_result_update_result_link(self) -> None:
"""Tests view result: the update-result link."""
root = self.get_dom_for_path("/osm/street-housenumbers/gazdagret/view-result")
results = root.findall("body/div[@id='toolbar']/a[@href='/osm/missing-housenumbers/gazdagret/view-result']")
self.assertTrue(results)
def test_view_query_well_formed(self) -> None:
"""Tests if the view-query output is well-formed."""
root = self.get_dom_for_path("/osm/street-housenumbers/gazdagret/view-query")
results = root.findall("body/pre")
self.assertEqual(len(results), 1)
class TestMissingStreets(TestWsgi):
"""Tests the missing streets page."""
def test_well_formed(self) -> None:
"""Tests if the output is well-formed."""
root = self.get_dom_for_path("/osm/missing-housenumbers/gazdagret/view-result")
results = root.findall("body/table")
self.assertEqual(len(results), 1)
class TestMain(TestWsgi):
"""Tests handle_main()."""
def test_well_formed(self) -> None:
"""Tests if the output is well-formed."""
root = self.get_dom_for_path("/osm")
results = root.findall("body/table")
self.assertEqual(len(results), 1)
class TestGetStaticdir(unittest.TestCase):
"""Tests get_staticdir()."""
def test_happy(self) -> None:
"""Tests the happy path."""
actual = wsgi.get_staticdir()
expected = os.path.join(os.getcwd(), "static")
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
{"/tests/test_wsgi.py": ["/wsgi.py"], "/wsgi.py": ["/util.py"], "/cron.py": ["/util.py"]}
|
39,919
|
ImreSamu/osm-gimmisn
|
refs/heads/master
|
/wsgi.py
|
#!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""The wsgi module contains functionality specific to the web interface."""
import configparser
import datetime
import json
import locale
import os
import subprocess
import sys
import traceback
import urllib.parse
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Tuple
import wsgiref.simple_server
import pytz
import yattag # type: ignore
import helpers
from i18n import translate as _
import overpass_query
import version
import util
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse
def get_config() -> configparser.ConfigParser:
"""Gets access to information which are specific to this installation."""
config = configparser.ConfigParser()
config_path = util.get_abspath("wsgi.ini")
config.read(config_path)
return config
def get_datadir() -> str:
"""Gets the directory which is tracked (in version control) data."""
return util.get_abspath("data")
def get_staticdir() -> str:
"""Gets the directory which is static data."""
return os.path.join(os.path.dirname(__file__), "static")
def handle_streets(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/streets/ormezo/view-query."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.Doc()
doc.asis(get_toolbar(relations, "streets", relation_name, osmrelation).getvalue())
if action == "view-query":
with doc.tag("pre"):
doc.text(relation.get_osm_streets_query())
elif action == "view-result":
with relation.get_files().get_osm_streets_stream("r") as sock:
table = util.tsv_to_list(sock)
doc.asis(util.html_table_from_list(table).getvalue())
elif action == "update-result":
query = relation.get_osm_streets_query()
try:
relation.get_files().write_osm_streets(overpass_query.overpass_query(query))
streets = relation.get_config().should_check_missing_streets()
if streets != "only":
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
else:
doc.text(_("Update successful."))
except urllib.error.HTTPError as http_error:
doc.asis(util.handle_overpass_error(http_error).getvalue())
date = get_streets_last_modified(relation)
doc.asis(get_footer(date).getvalue())
return doc
def handle_street_housenumbers(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/street-housenumbers/ormezo/view-query."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.Doc()
doc.asis(get_toolbar(relations, "street-housenumbers", relation_name, osmrelation).getvalue())
if action == "view-query":
with doc.tag("pre"):
doc.text(relation.get_osm_housenumbers_query())
elif action == "view-result":
with relation.get_files().get_osm_housenumbers_stream(mode="r") as sock:
table = util.tsv_to_list(sock)
doc.asis(util.html_table_from_list(table).getvalue())
elif action == "update-result":
query = relation.get_osm_housenumbers_query()
try:
relation.get_files().write_osm_housenumbers(overpass_query.overpass_query(query))
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
except urllib.error.HTTPError as http_error:
doc.asis(util.handle_overpass_error(http_error).getvalue())
date = get_housenumbers_last_modified(relation)
doc.asis(get_footer(date).getvalue())
return doc
def missing_housenumbers_view_turbo(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-turbo."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
doc = yattag.Doc()
relation = relations.get_relation(relation_name)
ret = relation.write_missing_housenumbers()
_todo_street_count, _todo_count, _done_count, _percent, table = ret
query = helpers.make_turbo_query_for_streets(relation, table)
with doc.tag("pre"):
doc.text(query)
return doc
def missing_housenumbers_view_res(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-result."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
doc = yattag.Doc()
relation = relations.get_relation(relation_name)
if not os.path.exists(relation.get_files().get_osm_streets_path()):
doc.text(_("No existing streets: "))
link = "/osm/streets/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Call Overpass to create")).getvalue())
elif not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
doc.text(_("No existing house numbers: "))
link = "/osm/street-housenumbers/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Call Overpass to create")).getvalue())
elif not os.path.exists(relation.get_files().get_ref_housenumbers_path()):
doc.text(_("No missing house numbers: "))
link = "/osm/missing-housenumbers/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Create from reference")).getvalue())
else:
ret = relation.write_missing_housenumbers()
todo_street_count, todo_count, done_count, percent, table = ret
with doc.tag("p"):
doc.text(_("OpenStreetMap is possibly missing the below {0} house numbers for {1} streets.")
.format(str(todo_count), str(todo_street_count)))
doc.text(_(" (existing: {0}, ready: {1}%).").format(str(done_count), str(percent)))
doc.stag("br")
with doc.tag("a", href="https://github.com/vmiklos/osm-gimmisn/tree/master/doc"):
doc.text(_("Filter incorrect information"))
doc.text(".")
doc.stag("br")
with doc.tag("a", href="/osm/missing-housenumbers/{}/view-turbo".format(relation_name)):
doc.text(_("Overpass turbo query for the below streets"))
doc.text(".")
doc.asis(util.html_table_from_list(table).getvalue())
return doc
def missing_relations_view_result(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/budapest_11/view-result."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
doc = yattag.Doc()
if not os.path.exists(relation.get_files().get_osm_streets_path()):
doc.text(_("No existing streets: "))
with doc.tag("a", href="/osm/streets/" + relation_name + "/update-result"):
doc.text(_("Call Overpass to create"))
elif not os.path.exists(relation.get_files().get_ref_streets_path()):
doc.text(_("No street list: "))
with doc.tag("a", href="/osm/missing-streets/" + relation_name + "/update-result"):
doc.text(_("Create from reference"))
else:
ret = relation.write_missing_streets()
todo_count, done_count, percent, streets = ret
streets.sort(key=locale.strxfrm)
table = [[util.html_escape(_("Street name"))]]
for street in streets:
table.append([util.html_escape(street)])
with doc.tag("p"):
doc.text(_("OpenStreetMap is possibly missing the below {0} streets.").format(str(todo_count)))
doc.text(_(" (existing: {0}, ready: {1}%).").format(str(done_count), str(percent)))
doc.asis(util.html_table_from_list(table).getvalue())
return doc
def missing_housenumbers_view_txt(relations: helpers.Relations, request_uri: str) -> str:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-result.txt."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
output = ""
if not os.path.exists(relation.get_files().get_osm_streets_path()):
output += _("No existing streets")
elif not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
output += _("No existing house numbers")
elif not os.path.exists(relation.get_files().get_ref_housenumbers_path()):
output += _("No reference house numbers")
else:
ongoing_streets, _ignore = relation.get_missing_housenumbers()
table = []
for result in ongoing_streets:
if result[1]:
result_strings = util.get_housenumber_ranges(result[1])
# Street name, only_in_reference items.
if not relation.get_config().get_street_is_even_odd(result[0]):
row = result[0] + "\t[" + ", ".join(result_strings) + "]"
else:
elements = util.format_even_odd(result_strings, doc=None)
row = result[0] + "\t[" + "], [".join(elements) + "]"
table.append(row)
table.sort(key=locale.strxfrm)
output += "\n".join(table)
return output
def missing_streets_view_txt(relations: helpers.Relations, request_uri: str) -> str:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/view-result.txt."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
output = ""
if not os.path.exists(relation.get_files().get_osm_streets_path()):
output += _("No existing streets")
elif not os.path.exists(relation.get_files().get_ref_streets_path()):
output += _("No reference streets")
else:
todo_streets, _ignore = relation.get_missing_streets()
todo_streets.sort(key=locale.strxfrm)
output += "\n".join(todo_streets)
return output
def missing_housenumbers_update(relations: helpers.Relations, relation_name: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/update-result."""
reference = get_config().get('wsgi', 'reference_housenumbers').strip().split(' ')
reference = [util.get_abspath(i) for i in reference]
relation = relations.get_relation(relation_name)
relation.write_ref_housenumbers(reference)
doc = yattag.Doc()
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
return doc
def missing_streets_update(relations: helpers.Relations, relation_name: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/update-result."""
reference = util.get_abspath(get_config().get('wsgi', 'reference_street').strip())
relation = relations.get_relation(relation_name)
relation.write_ref_streets(reference)
return util.html_escape(_("Update successful."))
def handle_missing_housenumbers(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-[result|query]."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
date = None
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.Doc()
doc.asis(get_toolbar(relations, "missing-housenumbers", relation_name, osmrelation).getvalue())
if action == "view-result":
doc.asis(missing_housenumbers_view_res(relations, request_uri).getvalue())
elif action == "view-turbo":
doc.asis(missing_housenumbers_view_turbo(relations, request_uri).getvalue())
elif action == "view-query":
with doc.tag("pre"):
with relation.get_files().get_ref_housenumbers_stream("r") as sock:
doc.text(sock.read())
date = get_last_modified(relation.get_files().get_ref_housenumbers_path())
elif action == "update-result":
doc.asis(missing_housenumbers_update(relations, relation_name).getvalue())
if not date:
date = ref_housenumbers_last_modified(relations, relation_name)
doc.asis(get_footer(date).getvalue())
return doc
def handle_missing_streets(relations: helpers.Relations, request_uri: str) -> yattag.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/view-[result|query]."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.Doc()
doc.asis(get_toolbar(relations, "missing-streets", relation_name, osmrelation).getvalue())
if action == "view-result":
doc.asis(missing_relations_view_result(relations, request_uri).getvalue())
elif action == "view-query":
with doc.tag("pre"):
with relation.get_files().get_ref_streets_stream("r") as sock:
doc.text(sock.read())
elif action == "update-result":
doc.asis(missing_streets_update(relations, relation_name).getvalue())
date = ref_streets_last_modified(relation)
doc.asis(get_footer(date).getvalue())
return doc
def local_to_ui_tz(local_dt: datetime.datetime) -> datetime.datetime:
"""Converts from local date-time to UI date-time, based on config."""
config = get_config()
if config.has_option("wsgi", "timezone"):
ui_tz = pytz.timezone(config.get("wsgi", "timezone"))
else:
ui_tz = pytz.timezone("Europe/Budapest")
return local_dt.astimezone(ui_tz)
def get_last_modified(workdir: str, path: str = "") -> str:
"""Gets the update date of a file in workdir."""
if path:
path = os.path.join(workdir, path)
else:
path = workdir
return format_timestamp(get_timestamp(path))
def get_timestamp(workdir: str, path: str = "") -> float:
"""Gets the timestamp of a file in workdir."""
if path:
path = os.path.join(workdir, path)
else:
path = workdir
try:
return os.path.getmtime(path)
except FileNotFoundError:
return 0
def format_timestamp(timestamp: float) -> str:
"""Formats timestamp as UI date-time."""
local_dt = datetime.datetime.fromtimestamp(timestamp)
ui_dt = local_to_ui_tz(local_dt)
fmt = '%Y-%m-%d %H:%M'
return ui_dt.strftime(fmt)
def ref_housenumbers_last_modified(relations: helpers.Relations, name: str) -> str:
"""Gets the update date for missing house numbers."""
relation = relations.get_relation(name)
t_ref = get_timestamp(relation.get_files().get_ref_housenumbers_path())
t_housenumbers = get_timestamp(relation.get_files().get_osm_housenumbers_path())
return format_timestamp(max(t_ref, t_housenumbers))
def ref_streets_last_modified(relation: helpers.Relation) -> str:
"""Gets the update date for missing streets."""
t_ref = get_timestamp(relation.get_files().get_ref_streets_path())
t_osm = get_timestamp(relation.get_files().get_osm_streets_path())
return format_timestamp(max(t_ref, t_osm))
def get_housenumbers_last_modified(relation: helpers.Relation) -> str:
"""Gets the update date of house numbers for a relation."""
return get_last_modified(relation.get_files().get_osm_housenumbers_path())
def get_streets_last_modified(relation: helpers.Relation) -> str:
"""Gets the update date of streets for a relation."""
return get_last_modified(relation.get_files().get_osm_streets_path())
def handle_main_housenr_percent(relation: helpers.Relation) -> Tuple[yattag.Doc, str]:
"""Handles the house number percent part of the main page."""
url = "/osm/missing-housenumbers/" + relation.get_name() + "/view-result"
percent = "N/A"
if os.path.exists(relation.get_files().get_housenumbers_percent_path()):
percent = helpers.get_content(relation.get_files().get_housenumbers_percent_path())
doc = yattag.Doc()
if percent != "N/A":
date = get_last_modified(relation.get_files().get_housenumbers_percent_path())
with doc.tag("strong"):
with doc.tag("a", href=url, title=_("updated") + " " + date):
doc.text(percent + "%")
return doc, percent
with doc.tag("strong"):
with doc.tag("a", href=url):
doc.text(_("missing house numbers"))
return doc, "0"
def handle_main_street_percent(relation: helpers.Relation) -> Tuple[yattag.Doc, str]:
"""Handles the street percent part of the main page."""
url = "/osm/missing-streets/" + relation.get_name() + "/view-result"
percent = "N/A"
if os.path.exists(relation.get_files().get_streets_percent_path()):
percent = helpers.get_content(relation.get_files().get_streets_percent_path())
doc = yattag.Doc()
if percent != "N/A":
date = get_last_modified(relation.get_files().get_streets_percent_path())
with doc.tag("strong"):
with doc.tag("a", href=url, title=_("updated") + " " + date):
doc.text(percent + "%")
return doc, percent
with doc.tag("strong"):
with doc.tag("a", href=url):
doc.text(_("missing streets"))
return doc, "0"
def filter_for_everything(_complete: bool, _relation: helpers.Relation) -> bool:
"""Does not filter out anything."""
return True
def filter_for_incomplete(complete: bool, _relation: helpers.Relation) -> bool:
"""Filters out complete items."""
return not complete
def create_filter_for_refmegye(refmegye_filter: str) -> Callable[[bool, helpers.Relation], bool]:
"""Creates a function that filters for a single refmegye."""
return lambda _complete, relation: relation.get_config().get_refmegye() == refmegye_filter
def create_filter_for_refmegye_reftelepules(
refmegye_filter: str,
reftelepules_filter: str
) -> Callable[[bool, helpers.Relation], bool]:
"""Creates a function that filters for a single reftelepules in a refmegye."""
def filter_for(_complete: bool, relation: helpers.Relation) -> bool:
config = relation.get_config()
return config.get_refmegye() == refmegye_filter and config.get_reftelepules() == reftelepules_filter
return filter_for
def handle_main_filters_refmegye(relations: helpers.Relations, refmegye_id: str, refmegye: str) -> yattag.Doc:
"""Handles one refmegye in the filter part of the main wsgi page."""
doc = yattag.Doc()
name = relations.refmegye_get_name(refmegye)
if not name:
return doc
with doc.tag("a", href="/osm/filter-for/refmegye/" + refmegye):
doc.text(name)
if refmegye_id and refmegye == refmegye_id:
reftelepules_ids = relations.refmegye_get_reftelepules_ids(refmegye_id)
if reftelepules_ids:
names = [] # type: List[yattag.Doc]
for reftelepules_id in reftelepules_ids:
name = relations.reftelepules_get_name(refmegye_id, reftelepules_id)
if name:
name_doc = yattag.Doc()
href_format = "/osm/filter-for/refmegye/{}/reftelepules/{}"
with name_doc.tag("a", href=href_format.format(refmegye, reftelepules_id)):
name_doc.text(name)
names.append(name_doc)
if names:
doc.text(" (")
for index, item in enumerate(names):
if index:
doc.text(", ")
doc.asis(item.getvalue())
doc.text(")")
return doc
def handle_main_filters(relations: helpers.Relations, refmegye_id: str) -> yattag.Doc:
"""Handlers the filter part of the main wsgi page."""
items = [] # type: List[yattag.Doc]
doc = yattag.Doc()
with doc.tag("a", href="/osm/filter-for/incomplete"):
doc.text(_("Hide complete areas"))
items.append(doc)
# Sorted set of refmegye values of all relations.
for refmegye in sorted({relation.get_config().get_refmegye() for relation in relations.get_relations()}):
items.append(handle_main_filters_refmegye(relations, refmegye_id, refmegye))
doc = yattag.Doc()
with doc.tag("h1"):
doc.text(_("Where to map?"))
with doc.tag("p"):
doc.text(_("Filters:") + " ")
for index, item in enumerate(items):
if index:
doc.text(" ¦ ")
doc.asis(item.getvalue())
return doc
def setup_main_filter_for(request_uri: str) -> Tuple[Callable[[bool, helpers.Relation], bool], str]:
"""Sets up a filter-for function from request uri: only certain areas are shown then."""
tokens = request_uri.split("/")
filter_for = filter_for_everything # type: Callable[[bool, helpers.Relation], bool]
filters = util.parse_filters(tokens)
refmegye = ""
if "incomplete" in filters:
# /osm/filter-for/incomplete
filter_for = filter_for_incomplete
elif "refmegye" in filters and "reftelepules" in filters:
# /osm/filter-for/refmegye/<value>/reftelepules/<value>.
refmegye = filters["refmegye"]
filter_for = create_filter_for_refmegye_reftelepules(filters["refmegye"], filters["reftelepules"])
elif "refmegye" in filters:
# /osm/filter-for/refmegye/<value>.
refmegye = filters["refmegye"]
filter_for = create_filter_for_refmegye(refmegye)
return filter_for, refmegye
def handle_main_relation(
relations: helpers.Relations,
filter_for: Callable[[bool, helpers.Relation], bool],
relation_name: str
) -> List[yattag.Doc]:
"""Handles one relation (one table row) on the main page."""
relation = relations.get_relation(relation_name)
complete = True
streets = relation.get_config().should_check_missing_streets()
row = [] # List[yattag.Doc]
row.append(util.html_escape(relation_name))
if streets != "only":
cell, percent = handle_main_housenr_percent(relation)
doc = yattag.Doc()
doc.asis(cell.getvalue())
row.append(doc)
if float(percent) < 100.0:
complete = False
date = get_housenumbers_last_modified(relation)
doc = yattag.Doc()
href = "/osm/street-housenumbers/" + relation_name + "/view-result"
with doc.tag("a", href=href, title=_("updated") + " " + date):
doc.text(_("existing house numbers"))
row.append(doc)
else:
row.append(yattag.Doc())
row.append(yattag.Doc())
if streets != "no":
cell, percent = handle_main_street_percent(relation)
row.append(cell)
if float(percent) < 100.0:
complete = False
else:
row.append(yattag.Doc())
date = get_streets_last_modified(relation)
doc = yattag.Doc()
with doc.tag("a", href="/osm/streets/" + relation_name + "/view-result", title=_("updated") + " " + date):
doc.text(_("existing streets"))
row.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="https://www.openstreetmap.org/relation/" + str(relation.get_config().get_osmrelation())):
doc.text(_("area boundary"))
row.append(doc)
if not filter_for(complete, relation):
row.clear()
return row
def handle_main(request_uri: str, relations: helpers.Relations) -> yattag.Doc:
"""Handles the main wsgi page.
Also handles /osm/filter-for/* which filters for a condition."""
filter_for, refmegye = setup_main_filter_for(request_uri)
doc = yattag.Doc()
doc.asis(get_toolbar(relations).getvalue())
doc.asis(handle_main_filters(relations, refmegye).getvalue())
table = []
table.append([util.html_escape(_("Area")),
util.html_escape(_("House number coverage")),
util.html_escape(_("Existing house numbers")),
util.html_escape(_("Street coverage")),
util.html_escape(_("Existing streets")),
util.html_escape(_("Area boundary"))])
for relation_name in relations.get_names():
row = handle_main_relation(relations, filter_for, relation_name)
if row:
table.append(row)
doc.asis(util.html_table_from_list(table).getvalue())
with doc.tag("p"):
with doc.tag("a", href="https://github.com/vmiklos/osm-gimmisn/tree/master/doc"):
doc.text(_("Add new area"))
doc.asis(get_footer().getvalue())
return doc
def fill_missing_header_items(streets: str, relation_name: str, items: List[yattag.Doc]) -> None:
"""Generates the 'missing house numbers/streets' part of the header."""
if streets != "only":
doc = yattag.Doc()
with doc.tag("a", href="/osm/missing-housenumbers/" + relation_name + "/view-result"):
doc.text(_("Missing house numbers"))
doc.text(" (")
with doc.tag("a", href="/osm/missing-housenumbers/" + relation_name + "/view-result.txt"):
doc.text("txt")
doc.text(")")
items.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="/osm/street-housenumbers/" + relation_name + "/view-result"):
doc.text(_("Existing house numbers"))
items.append(doc)
if streets != "no":
doc = yattag.Doc()
with doc.tag("a", href="/osm/missing-streets/" + relation_name + "/view-result"):
doc.text(_("Missing streets"))
doc.text(" (")
with doc.tag("a", href="/osm/missing-streets/" + relation_name + "/view-result.txt"):
doc.text("txt")
doc.text(")")
items.append(doc)
def get_html_title(request_uri: str) -> str:
"""Determines the HTML title for a given function and relation name."""
tokens = request_uri.split("/")
function = ""
relation_name = ""
if len(tokens) > 3:
function = tokens[2]
relation_name = tokens[3]
title = ""
if function == "missing-housenumbers":
title = " - " + _("{0} missing house numbers").format(relation_name)
elif function == "missing-streets":
title = " - " + relation_name + " " + _("missing streets")
elif function == "street-housenumbers":
title = " - " + relation_name + " " + _("existing house numbers")
elif function == "streets":
title = " - " + relation_name + " " + _("existing streets")
return title
def fill_header_function(function: str, relation_name: str, items: List[yattag.Doc]) -> None:
"""Fills items with function-specific links in the header. Returns a title."""
if function == "missing-housenumbers":
doc = yattag.Doc()
with doc.tag("a", href="/osm/missing-housenumbers/" + relation_name + "/update-result"):
doc.text(_("Update from reference"))
doc.text(" " + _("(may take seconds)"))
items.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="https://overpass-turbo.eu/"):
doc.text(_("Overpass turbo"))
items.append(doc)
elif function == "missing-streets":
doc = yattag.Doc()
with doc.tag("a", href="/osm/missing-streets/" + relation_name + "/update-result"):
doc.text(_("Update from reference"))
items.append(doc)
elif function == "street-housenumbers":
doc = yattag.Doc()
with doc.tag("a", href="/osm/street-housenumbers/" + relation_name + "/update-result"):
doc.text(_("Call Overpass to update"))
doc.text(" " + _("(may take seconds)"))
items.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="/osm/street-housenumbers/" + relation_name + "/view-query"):
doc.text(_("View query"))
items.append(doc)
elif function == "streets":
doc = yattag.Doc()
with doc.tag("a", href="/osm/streets/" + relation_name + "/update-result"):
doc.text(_("Call Overpass to update"))
doc.text(" " + _("(may take seconds)"))
items.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="/osm/streets/" + relation_name + "/view-query"):
doc.text(_("View query"))
items.append(doc)
def write_html_head(doc: yattag.Doc, title: str) -> None:
"""Produces the <head> tag and its contents."""
with doc.tag("head"):
with doc.tag("title"):
doc.text(_("Where to map?") + title)
doc.stag("meta", charset="UTF-8")
doc.stag("link", rel="stylesheet", type="text/css", href="/osm/static/osm.css")
with doc.tag("script", src="/osm/static/sorttable.js"):
pass
doc.stag("meta", name="viewport", content="width=device-width, initial-scale=1")
def get_toolbar(
relations: Optional[helpers.Relations] = None,
function: str = "",
relation_name: str = "",
relation_osmid: int = 0
) -> yattag.Doc:
"""Produces the start of the page. Note that the content depends on the function and the
relation, but not on the action to keep a balance between too generic and too specific
content."""
items = [] # type: List[yattag.Doc]
if relations and relation_name:
relation = relations.get_relation(relation_name)
streets = relation.get_config().should_check_missing_streets()
doc = yattag.Doc()
with doc.tag("a", href="/osm"):
doc.text(_("Area list"))
items.append(doc)
if relation_name:
fill_missing_header_items(streets, relation_name, items)
doc = yattag.Doc()
with doc.tag("a", href="/osm/streets/" + relation_name + "/view-result"):
doc.text(_("Existing streets"))
items.append(doc)
fill_header_function(function, relation_name, items)
if relation_osmid:
doc = yattag.Doc()
with doc.tag("a", href="https://www.openstreetmap.org/relation/" + str(relation_osmid)):
doc.text(_("Area boundary"))
items.append(doc)
doc = yattag.Doc()
with doc.tag("a", href="https://github.com/vmiklos/osm-gimmisn/tree/master/doc"):
doc.text(_("Documentation"))
items.append(doc)
doc = yattag.Doc()
with doc.tag("div", id="toolbar"):
for index, item in enumerate(items):
if index:
doc.text(" ¦ ")
doc.asis(item.getvalue())
doc.stag("hr")
return doc
def get_footer(last_updated: str = "") -> yattag.Doc:
"""Produces the end of the page."""
items = [] # type: List[yattag.Doc]
doc = yattag.Doc()
doc.text(_("Version: "))
doc.asis(util.git_link(version.VERSION, "https://github.com/vmiklos/osm-gimmisn/commit/").getvalue())
items.append(doc)
items.append(util.html_escape(_("OSM data © OpenStreetMap contributors.")))
if last_updated:
items.append(util.html_escape(_("Last update: ") + last_updated))
doc = yattag.Doc()
doc.stag("hr")
with doc.tag("div"):
for index, item in enumerate(items):
if index:
doc.text(" ¦ ")
doc.asis(item.getvalue())
return doc
def handle_github_webhook(environ: Dict[str, Any]) -> yattag.Doc:
"""Handles a GitHub style webhook."""
body = urllib.parse.parse_qs(environ["wsgi.input"].read().decode('utf-8'))
payload = body["payload"][0]
root = json.loads(payload)
if root["ref"] == "refs/heads/master":
subprocess.run(["make", "-C", util.get_abspath(""), "deploy-pythonanywhere"], check=True)
return util.html_escape("")
def handle_static(request_uri: str) -> Tuple[str, str]:
"""Handles serving static content."""
tokens = request_uri.split("/")
path = tokens[-1]
if request_uri.endswith(".js"):
content_type = "application/x-javascript"
elif request_uri.endswith(".css"):
content_type = "text/css"
if path.endswith(".js") or path.endswith(".css"):
return helpers.get_content(get_staticdir(), path), content_type
return "", ""
def send_response(start_response: 'StartResponse', content_type: str, status: str, output: str) -> Iterable[bytes]:
"""Turns an output string into a byte array and sends it."""
output_bytes = output.encode('utf-8')
response_headers = [('Content-type', content_type + '; charset=utf-8'),
('Content-Length', str(len(output_bytes)))]
start_response(status, response_headers)
return [output_bytes]
def our_application_txt(
start_response: 'StartResponse',
relations: helpers.Relations,
request_uri: str
) -> Iterable[bytes]:
"""Dispatches plain text requests based on their URIs."""
content_type = "text/plain"
if request_uri.startswith("/osm/missing-streets/"):
output = missing_streets_view_txt(relations, request_uri)
elif request_uri.startswith("/osm/missing-housenumbers/"):
output = missing_housenumbers_view_txt(relations, request_uri)
return send_response(start_response, content_type, "200 OK", output)
def get_request_uri(environ: Dict[str, Any]) -> str:
"""Finds out the request URI."""
request_uri = ""
path_info = environ.get("PATH_INFO")
if path_info:
request_uri = path_info
if request_uri:
# Compatibility.
if request_uri.startswith("/osm/suspicious-streets/"):
request_uri = request_uri.replace('suspicious-streets', 'missing-housenumbers')
elif request_uri.startswith("/osm/suspicious-relations/"):
request_uri = request_uri.replace('suspicious-relations', 'missing-streets')
return request_uri
def our_application(
environ: Dict[str, Any],
start_response: 'StartResponse'
) -> Iterable[bytes]:
"""Dispatches the request based on its URI."""
config = get_config()
if config.has_option("wsgi", "locale"):
ui_locale = config.get("wsgi", "locale")
else:
ui_locale = "hu_HU.UTF-8"
try:
locale.setlocale(locale.LC_ALL, ui_locale)
except locale.Error:
# Ignore, this happens only on the cut-down CI environment.
pass
language = util.setup_localization(environ)
if not language:
language = "hu"
request_uri = get_request_uri(environ)
_ignore, _ignore, ext = request_uri.partition('.')
relations = helpers.Relations(get_datadir(), helpers.get_workdir(config))
if ext == "txt":
return our_application_txt(start_response, relations, request_uri)
if request_uri.startswith("/osm/static/"):
output, content_type = handle_static(request_uri)
return send_response(start_response, content_type, "200 OK", output)
doc = yattag.Doc()
util.write_html_header(doc)
with doc.tag("html", lang=language):
write_html_head(doc, get_html_title(request_uri))
with doc.tag("body"):
if request_uri.startswith("/osm/streets/"):
doc.asis(handle_streets(relations, request_uri).getvalue())
elif request_uri.startswith("/osm/missing-streets/"):
doc.asis(handle_missing_streets(relations, request_uri).getvalue())
elif request_uri.startswith("/osm/street-housenumbers/"):
doc.asis(handle_street_housenumbers(relations, request_uri).getvalue())
elif request_uri.startswith("/osm/missing-housenumbers/"):
doc.asis(handle_missing_housenumbers(relations, request_uri).getvalue())
elif request_uri.startswith("/osm/webhooks/github"):
doc.asis(handle_github_webhook(environ).getvalue())
else:
doc.asis(handle_main(request_uri, relations).getvalue())
return send_response(start_response, "text/html", "200 OK", doc.getvalue())
def handle_exception(
environ: Dict[str, Any],
start_response: 'StartResponse'
) -> Iterable[bytes]:
"""Displays an unhandled exception on the page."""
status = '500 Internal Server Error'
path_info = environ.get("PATH_INFO")
if path_info:
request_uri = path_info
doc = yattag.Doc()
util.write_html_header(doc)
with doc.tag("pre"):
doc.text(_("Internal error when serving {0}").format(request_uri) + "\n")
doc.text(traceback.format_exc())
return send_response(start_response, "text/html", status, doc.getvalue())
def application(
environ: Dict[str, Any],
start_response: 'StartResponse'
) -> Iterable[bytes]:
"""The entry point of this WSGI app."""
try:
return our_application(environ, start_response)
# pylint: disable=broad-except
except Exception:
return handle_exception(environ, start_response)
def main() -> None:
"""Commandline interface to this module."""
if sys.platform.startswith("win"):
import _locale
# pylint: disable=protected-access
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])
httpd = wsgiref.simple_server.make_server('', 8000, application)
print("Open <http://localhost:8000/osm> in your browser.")
httpd.serve_forever()
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
{"/tests/test_wsgi.py": ["/wsgi.py"], "/wsgi.py": ["/util.py"], "/cron.py": ["/util.py"]}
|
39,920
|
ImreSamu/osm-gimmisn
|
refs/heads/master
|
/cron.py
|
#!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""The cron module allows doing nightly tasks."""
import configparser
import datetime
import logging
import os
import time
import traceback
import urllib.error
import helpers
import overpass_query
import util
def overpass_sleep() -> None:
"""Sleeps to respect overpass rate limit."""
while True:
sleep = overpass_query.overpass_query_need_sleep()
if not sleep:
break
logging.info("overpass_sleep: waiting for %s seconds", sleep)
time.sleep(sleep)
def should_retry(retry: int) -> bool:
"""Decides if we should retry a query or not."""
return retry < 20
def update_streets(relations: helpers.Relations) -> None:
"""Update the existing street list of all relations."""
for relation_name in relations.get_active_names():
logging.info("update_streets: start: %s", relation_name)
relation = relations.get_relation(relation_name)
retry = 0
while should_retry(retry):
if retry > 0:
logging.info("update_streets: try #%s", retry)
retry += 1
try:
overpass_sleep()
query = relation.get_osm_streets_query()
relation.get_files().write_osm_streets(overpass_query.overpass_query(query))
break
except urllib.error.HTTPError as http_error:
logging.info("update_streets: http error: %s", str(http_error))
logging.info("update_streets: end: %s", relation_name)
def update_street_housenumbers(relations: helpers.Relations) -> None:
"""Update the existing OSM street housenumber list of all relations."""
for relation_name in relations.get_active_names():
logging.info("update_street_housenumbers: start: %s", relation_name)
retry = 0
while should_retry(retry):
if retry > 0:
logging.info("update_street_housenumbers: try #%s", retry)
retry += 1
try:
overpass_sleep()
relation = relations.get_relation(relation_name)
query = relation.get_osm_housenumbers_query()
relation.get_files().write_osm_housenumbers(overpass_query.overpass_query(query))
break
except urllib.error.HTTPError as http_error:
logging.info("update_street_housenumbers: http error: %s", str(http_error))
logging.info("update_street_housenumbers: end: %s", relation_name)
def update_street_housenumbers_ref(relations: helpers.Relations, config: configparser.ConfigParser) -> None:
"""Update the existing reference street housenumber list of all relations."""
for relation_name in relations.get_active_names():
logging.info("update_street_housenumbers_ref: start: %s", relation_name)
relation = relations.get_relation(relation_name)
reference = config.get('wsgi', 'reference_housenumbers').strip().split(' ')
relation.write_ref_housenumbers(reference)
logging.info("update_street_housenumbers_ref: end: %s", relation_name)
def update_missing_housenumbers(relations: helpers.Relations) -> None:
"""Update the relation's house number coverage stats."""
logging.info("update_missing_housenumbers: start")
for relation_name in relations.get_active_names():
relation = relations.get_relation(relation_name)
streets = relation.get_config().should_check_missing_streets()
if streets == "only":
continue
relation.write_missing_housenumbers()
logging.info("update_missing_housenumbers: end")
def update_missing_streets_stats(relations: helpers.Relations) -> None:
"""Update the relation's street coverage stats."""
logging.info("update_missing_streets_stats: start")
for relation_name in relations.get_active_names():
relation = relations.get_relation(relation_name)
streets = relation.get_config().should_check_missing_streets()
if streets == "no":
continue
relation.write_missing_streets()
logging.info("update_missing_streets_stats: end")
def our_main(relations: helpers.Relations, config: configparser.ConfigParser) -> None:
"""Performs the actual nightly task."""
update_streets(relations)
update_street_housenumbers(relations)
update_street_housenumbers_ref(relations, config)
update_missing_housenumbers(relations)
update_missing_streets_stats(relations)
def main() -> None:
"""Commandline interface to this module."""
config = configparser.ConfigParser()
config_path = util.get_abspath("wsgi.ini")
config.read(config_path)
datadir = util.get_abspath("data")
workdir = helpers.get_workdir(config)
relations = helpers.Relations(datadir, workdir)
logpath = os.path.join(workdir, "cron.log")
logging.basicConfig(filename=logpath,
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler())
start = time.time()
# Query inactive relations once a month.
relations.activate_all(time.localtime(start).tm_mday == 1)
try:
our_main(relations, config)
# pylint: disable=broad-except
except Exception:
logging.error("main: unhandled exception: %s", traceback.format_exc())
delta = time.time() - start
logging.info("main: finished in %s", str(datetime.timedelta(seconds=delta)))
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
{"/tests/test_wsgi.py": ["/wsgi.py"], "/wsgi.py": ["/util.py"], "/cron.py": ["/util.py"]}
|
39,921
|
ImreSamu/osm-gimmisn
|
refs/heads/master
|
/util.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Miklos Vajna and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The util module contains free functions shared between other modules."""
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Iterable
from typing import Optional
from typing import Sequence
from typing import TextIO
from typing import Tuple
from typing import cast
import os
import pickle
import re
import urllib.error
import yattag # type: ignore
import accept_language
from i18n import translate as _
import i18n
import overpass_query
class HouseNumber:
"""
A house number is a string which remembers what was its provider range. E.g. the "1-3" string
can generate 3 house numbers, all of them with the same range.
"""
def __init__(self, number: str, source: str) -> None:
self.__number = number
self.__source = source
def get_number(self) -> str:
"""Returns the house number string."""
return self.__number
def get_source(self) -> str:
"""Returns the source range."""
return self.__source
def __repr__(self) -> str:
return "HouseNumber(number=%s, source=%s)" % (self.__number, self.__source)
def __eq__(self, other: object) -> bool:
"""Source is explicitly non-interesting."""
other_house_number = cast(HouseNumber, other)
return self.__number == other_house_number.get_number()
def __hash__(self) -> int:
"""Source is explicitly non-interesting."""
return hash(self.__number)
@staticmethod
def is_invalid(house_number: str, invalids: List[str]) -> bool:
"""Decides if house_number is invalid according to invalids."""
if house_number in invalids:
return True
number = ""
match = re.match(r"([0-9]+).*", house_number)
if match:
number = match.group(1)
suffix = ""
match = re.match(r".*([A-Za-z]+)", house_number)
if match:
suffix = match.group(1).lower()
house_number = number + suffix
return house_number in invalids
def format_even_odd(only_in_ref: List[str], doc: Optional[yattag.Doc]) -> List[str]:
"""Separate even and odd numbers, this helps survey in most cases."""
key = split_house_number
even = sorted([i for i in only_in_ref if int(split_house_number(i)[0]) % 2 == 0], key=key)
odd = sorted([i for i in only_in_ref if int(split_house_number(i)[0]) % 2 == 1], key=key)
if doc:
if odd:
for index, elem in enumerate(odd):
if index:
doc.text(", ")
doc.asis(color_house_number(elem).getvalue())
if even:
if odd:
doc.stag("br")
for index, elem in enumerate(even):
if index:
doc.text(", ")
doc.asis(color_house_number(elem).getvalue())
return []
even_string = ", ".join(even)
odd_string = ", ".join(odd)
elements = []
if odd_string:
elements.append(odd_string)
if even_string:
elements.append(even_string)
return elements
def color_house_number(fro: str) -> yattag.Doc:
"""Colors a house number according to its suffix."""
doc = yattag.Doc()
if not fro.endswith("*"):
doc.text(fro)
return doc
with doc.tag("span", style="color: blue;"):
doc.text(fro[:-1])
return doc
def build_street_reference_cache(local_streets: str) -> Dict[str, Dict[str, List[str]]]:
"""Builds an in-memory cache from the reference on-disk TSV (street version)."""
memory_cache = {} # type: Dict[str, Dict[str, List[str]]]
disk_cache = local_streets + ".pickle"
if os.path.exists(disk_cache):
with open(disk_cache, "rb") as sock_cache:
memory_cache = pickle.load(sock_cache)
return memory_cache
with open(local_streets, "r") as sock:
first = True
while True:
line = sock.readline()
if first:
first = False
continue
if not line:
break
refmegye, reftelepules, street = line.strip().split("\t")
# Filter out invalid street type.
street = re.sub(" null$", "", street)
if refmegye not in memory_cache.keys():
memory_cache[refmegye] = {}
if reftelepules not in memory_cache[refmegye].keys():
memory_cache[refmegye][reftelepules] = []
memory_cache[refmegye][reftelepules].append(street)
with open(disk_cache, "wb") as sock_cache:
pickle.dump(memory_cache, sock_cache)
return memory_cache
def build_reference_cache(local: str) -> Dict[str, Dict[str, Dict[str, List[str]]]]:
"""Builds an in-memory cache from the reference on-disk TSV (house number version)."""
memory_cache = {} # type: Dict[str, Dict[str, Dict[str, List[str]]]]
disk_cache = local + ".pickle"
if os.path.exists(disk_cache):
with open(disk_cache, "rb") as sock_cache:
memory_cache = pickle.load(sock_cache)
return memory_cache
with open(local, "r") as sock:
first = True
while True:
line = sock.readline()
if first:
first = False
continue
if not line:
break
refmegye, reftelepules, street, num = line.strip().split("\t")
if refmegye not in memory_cache.keys():
memory_cache[refmegye] = {}
if reftelepules not in memory_cache[refmegye].keys():
memory_cache[refmegye][reftelepules] = {}
if street not in memory_cache[refmegye][reftelepules].keys():
memory_cache[refmegye][reftelepules][street] = []
memory_cache[refmegye][reftelepules][street].append(num)
with open(disk_cache, "wb") as sock_cache:
pickle.dump(memory_cache, sock_cache)
return memory_cache
def build_reference_caches(references: List[str]) -> List[Dict[str, Dict[str, Dict[str, List[str]]]]]:
"""Handles a list of references for build_reference_cache()."""
return [build_reference_cache(reference) for reference in references]
def split_house_number(house_number: str) -> Tuple[int, str]:
"""Splits house_number into a numerical and a remainder part."""
match = re.search(r"^([0-9]*)([^0-9].*|)$", house_number)
if not match: # pragma: no cover
return (0, '')
number = 0
try:
number = int(match.group(1))
except ValueError:
pass
return (number, match.group(2))
def parse_filters(tokens: List[str]) -> Dict[str, str]:
"""Parses a filter description, like 'filter-for', 'refmegye', '42'."""
ret = {} # type: Dict[str, str]
filter_for = False
for index, value in enumerate(tokens):
if value == "filter-for":
filter_for = True
continue
if not filter_for:
continue
if value == "incomplete":
ret[value] = ""
if index + 1 >= len(tokens):
continue
if value in ("refmegye", "reftelepules"):
ret[value] = tokens[index + 1]
return ret
def html_escape(text: str) -> yattag.Doc:
"""Factory of yattag.Doc from a string."""
doc = yattag.Doc()
doc.text(text)
return doc
def handle_overpass_error(http_error: urllib.error.HTTPError) -> yattag.Doc:
"""Handles a HTTP error from Overpass."""
doc = yattag.Doc()
doc.text(_("Overpass error: {0}").format(str(http_error)))
sleep = overpass_query.overpass_query_need_sleep()
if sleep:
doc.stag("br")
doc.text(_("Note: wait for {} seconds").format(sleep))
return doc
def setup_localization(environ: Dict[str, Any]) -> str:
"""Provides localized strings for this thread."""
# Set up localization.
languages = environ.get("HTTP_ACCEPT_LANGUAGE")
if languages:
parsed = accept_language.parse_accept_language(languages)
if parsed:
language = parsed[0].language
i18n.set_language(language)
return cast(str, language)
return ""
def gen_link(url: str, label: str) -> yattag.Doc:
"""Generates a link to a URL with a given label."""
doc = yattag.Doc()
with doc.tag("a", href=url):
doc.text(label + "...")
# Always auto-visit the link for now.
with doc.tag("script", type="text/javascript"):
doc.text("window.location.href = \"%s\";" % url)
return doc
def write_html_header(doc: yattag.Doc) -> None:
"""Produces the verify first line of a HTML output."""
doc.asis("<!DOCTYPE html>\n")
def process_template(buf: str, osmrelation: int) -> str:
"""Turns an overpass query template to an actual query."""
buf = buf.replace("@RELATION@", str(osmrelation))
# area is relation + 3600000000 (3600000000 == relation), see js/ide.js
# in https://github.com/tyrasd/overpass-turbo
buf = buf.replace("@AREA@", str(3600000000 + osmrelation))
return buf
def should_expand_range(numbers: List[int], street_is_even_odd: bool) -> bool:
"""Decides if an x-y range should be expanded."""
if len(numbers) != 2:
return False
if numbers[1] < numbers[0]:
# E.g. 42-1, -1 is just a suffix to be ignored.
numbers[1] = 0
return True
# If there is a parity mismatch, ignore.
if street_is_even_odd and numbers[0] % 2 != numbers[1] % 2:
return False
# Assume that 0 is just noise.
if numbers[0] == 0:
return False
# Ranges larger than this are typically just noise in the input data.
if numbers[1] > 1000 or numbers[1] - numbers[0] > 24:
return False
return True
def html_table_from_list(table: List[List[yattag.Doc]]) -> yattag.Doc:
"""Produces a HTML table from a list of lists."""
doc = yattag.Doc()
with doc.tag("table", klass="sortable"):
for row_index, row_content in enumerate(table):
with doc.tag("tr"):
for cell in row_content:
if row_index == 0:
with doc.tag("th"):
with doc.tag("a", href="#"):
doc.text(cell.getvalue())
else:
with doc.tag("td"):
doc.asis(cell.getvalue())
return doc
def tsv_to_list(stream: TextIO) -> List[List[yattag.Doc]]:
"""Turns a tab-separated table into a list of lists."""
table = []
first = True
type_index = 0
for line in stream.readlines():
if not line.strip():
continue
if first:
first = False
for index, column in enumerate(line.split("\t")):
if column.strip() == "@type":
type_index = index
cells = [html_escape(cell.strip()) for cell in line.split("\t")]
if cells and type_index:
# We know the first column is an OSM ID.
try:
osm_id = int(cells[0].getvalue())
osm_type = cells[type_index].getvalue()
doc = yattag.Doc()
href = "https://www.openstreetmap.org/{}/{}".format(osm_type, osm_id)
with doc.tag("a", href=href, target="_blank"):
doc.text(osm_id)
cells[0] = doc
except ValueError:
# Not an int, ignore.
pass
table.append(cells)
return table
def get_nth_column(sock: TextIO, column: int) -> List[str]:
"""Reads the content from sock, interprets its content as tab-separated values, finally returns
the values of the nth column. If a row has less columns, that's silently ignored."""
ret = []
first = True
for line in sock.readlines():
if first:
first = False
continue
tokens = line.strip().split('\t')
if len(tokens) < column + 1:
continue
ret.append(tokens[column])
return ret
def get_housenumber_ranges(house_numbers: List[HouseNumber]) -> List[str]:
"""Gets a reference range list for a house number list by looking at what range provided a givne
house number."""
ret = []
for house_number in house_numbers:
ret.append(house_number.get_source())
return sorted(set(ret))
def git_link(version: str, prefix: str) -> yattag.Doc:
"""Generates a HTML link based on a website prefix and a git-describe version."""
commit_hash = re.sub(".*-g", "", version)
doc = yattag.Doc()
with doc.tag("a", href=prefix + commit_hash):
doc.text(version)
return doc
def get_abspath(path: str) -> str:
"""Make a path absolute, taking the repo root as a base dir."""
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
def sort_numerically(strings: Iterable[HouseNumber]) -> List[HouseNumber]:
"""Sorts strings according to their numerical value, not alphabetically."""
return sorted(strings, key=lambda x: split_house_number(x.get_number()))
def process_csv_body(fun: Callable[[Iterable[str]], List[str]], data: str) -> str:
"""
Process the body of a CSV/TSV with the given function while keeping the header intact.
"""
lines = data.split('\n')
header = lines[0] if lines else ''
body = lines[1:] if lines else ''
result = [header] + fun(body)
return '\n'.join(result)
def get_array_nth(arr: Sequence[str], index: int) -> str:
"""Gets the nth element of arr, returns en empty string on error."""
return arr[index] if len(arr) > index else ''
def split_street_line(line: str) -> Tuple[bool, str, str, str, Tuple[int, str]]:
"""
Augment TSV Overpass street name result lines to aid sorting.
It prepends a bool to indicate whether the street is missing a name, thus
streets with missing names are ordered last.
oid is interpreted numerically while other fields are taken alphabetically.
"""
field = line.split('\t')
oid = get_array_nth(field, 0)
name = get_array_nth(field, 1)
highway = get_array_nth(field, 2)
service = get_array_nth(field, 3)
missing_name = name == ''
return (missing_name, name, highway, service, split_house_number(oid))
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
{"/tests/test_wsgi.py": ["/wsgi.py"], "/wsgi.py": ["/util.py"], "/cron.py": ["/util.py"]}
|
39,980
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/models/exponential_model.py
|
import warnings
import numpy as np
from scipy.optimize import curve_fit
from .base_model import BaseModel
class ExponentialModel(BaseModel):
plot_name = 'Exponential'
@staticmethod
def exponential(t, a, b, c):
return a * np.exp(b * t) + c
def train(self):
epopt, epcov = curve_fit(self.exponential, self.x_train, self.y_train, maxfev=10000)
lerror = np.sqrt(np.diag(epcov))
# for exponential curve at half maximum, slope = growth rate/2. so doubling time = ln(2) / (growth rate/2)
edoubletime = np.log(2) / epopt[1]
# standard error
edoubletimeerror = 1.96 * edoubletime * np.abs(lerror[1] / epopt[1])
# calculate R^2
residuals = self.y_train - self.exponential(self.x_train, *epopt)
ss_res = np.sum(residuals ** 2)
ss_tot = np.sum((self.y_train - np.mean(self.y_train)) ** 2)
exponentialr2 = 1 - (ss_res / ss_tot)
self.epopt = epopt
if exponentialr2 > 0.95:
self.log(f'\n** Ajuste exponencial**\n')
self.log(f'\tR^2: {round(exponentialr2, 5)}')
self.log(f'\tTiempo para duplicarse (ritmo actual): {round(edoubletime, 2)} (± {round(edoubletimeerror, 2)}) días')
else:
warning_message = f'Exponential model is trained but the results might be inaccurate, as the R2 value is {exponentialr2}'
warnings.warn(warning_message)
self.r2 = exponentialr2
self.is_trained = True
def predict(self):
self.y_pred = self.exponential(self.x_pred, *self.epopt)
self.is_predicted = True
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,981
|
xavierharo/covid-modeler
|
refs/heads/master
|
/gui.py
|
import os
import sys
module_path = os.path.abspath(os.path.join('.'))
if module_path not in sys.path:
sys.path.append(module_path)
from gooey import Gooey, GooeyParser
from modeler.wrapper import Modeler
@Gooey()
def main():
parser = GooeyParser(description='Process some integers.')
modeler = Modeler()
parser.add_argument(
'country',
widget='Dropdown',
choices=modeler.c.show_countries(),
help='Select a country from the list'
)
# predict_len=15, use_default_models=True, mode='notebook', output_folder='output', plot_mode='image'
parser.add_argument(
'--predict_len',
default=15,
metavar='Prediction length',
type=int,
help='Days to predict from the last train date'
)
parser.add_argument(
'--output_folder',
default='output',
metavar='Output folder',
type=str,
help='Where to put write the results'
)
parser.add_argument(
'--show_plot',
metavar='Show plot after finished',
widget='Dropdown',
choices=['Yes', 'No'],
default='Yes',
help='Show the plot after finishing'
)
args = parser.parse_args()
print(args)
show_plot = str(args.show_plot) == 'Yes'
modeler = Modeler(
country=args.country,
predict_len=args.predict_len,
output_folder=args.output_folder,
mode='cli',
plot_mode='html',
show_plot=show_plot
)
modeler.process()
if __name__ == '__main__':
main()
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,982
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/wrapper.py
|
import os
from datetime import timedelta
import pandas as pd
import numpy as np
import chart_studio
import chart_studio.plotly as py
import plotly.graph_objects as go
from . import countries, models
class Modeler:
default_models = {
'linear': models.LinearModel,
'logistic': models.LogisticModel,
'exponential': models.ExponentialModel
}
processed_models = {}
record = ''
def __init__(self, country=None, predict_len=15, use_default_models=True, mode='notebook', output_folder='output', plot_mode='image', show_plot=False):
self.predict_len = predict_len
self.c = countries.CountryData()
if country is not None:
self.set_country(country)
if use_default_models:
self.models = self.default_models
# export options
if mode not in ('notebook', 'cli'):
raise RuntimeError('El modo debe ser `notebook` o `cli`')
self.mode = mode
self.output_folder = output_folder
self.plot_mode = plot_mode
self.show_plot = show_plot
def log(self, text):
self.record += text
def process(self):
self.record = ''
if len(self.data[1]) >= 7:
current = self.data[1].astype(int)[-1]
lastweek = self.data[1].astype(int)[-8]
if current > lastweek:
self.log(f'Resultados para *{self.country_name}*')
self.log('\n** Basado en los datos de la última semana **\n')
self.log(f'\n\tCasos confirmados en {self.data[2][-1]} \t {current}')
self.log(f'\n\tCasos confirmados en {self.data[2][-8]} \t {lastweek}')
ratio = current / lastweek
self.log(f'\n\tProporción: {round(ratio, 2)}')
self.log(f'\n\tIncremento semanal: {round( 100 * (ratio - 1), 1)}%')
dailypercentchange = round( 100 * (pow(ratio, 1/7) - 1), 1)
self.log(f'\n\tIncremento diario: {dailypercentchange}% por día')
recentdbltime = round( 7 * np.log(2) / np.log(ratio), 1)
self.log(f'\n\tTiempo que tarda en duplicarse (al ritmo actual): {recentdbltime} días')
for name, model in self.models.items():
self.processed_models[name] = model(
x_train=self.data[0],
y_train=self.data[1],
predict_len=self.predict_len,
start_date=self.data[2][0]
)
self.create_record()
self.plot()
self.export()
def set_country(self, country):
self.data = self.c.get_country(country)
self.country_name = country
def create_record(self):
best_r2 = 0
best_model = ''
for name, model in self.processed_models.items():
self.log(model.record)
if hasattr(model, 'r2') and model.r2 > best_r2:
best_r2 = model.r2
best_model = model.plot_name
if best_r2 > 0:
self.log(f"\nMejor modelo: {best_model} (R2 = {best_r2})")
def plot(self):
plot_data = []
end_date = pd.to_datetime(self.data[2][0]).date() + timedelta(days=len(self.data[2]))
original_data = go.Scatter(
x=pd.date_range(start=str(self.data[2][0]), end=end_date),
y=self.data[1],
mode='markers',
name='Casos confirmados'
)
plot_data.append(original_data)
for name, model in self.processed_models.items():
plot_data.append(model.chart)
layout = dict(
title = self.country_name,
xaxis_type='date'
)
self.fig = go.Figure(data=plot_data, layout=layout)
def export(self):
if self.mode == 'notebook':
print(self.record)
self.fig.show()
return
# Crear la carpeta de destino
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
with open(os.path.join(self.output_folder, f'results_{self.country_name}.txt'), 'w', encoding='utf8') as output_file:
print(self.record)
output_file.write(self.record)
print("******************************************")
print(f"Resultados escritos en {output_file.name}")
print("******************************************")
# export the plot
if self.plot_mode == 'image':
self.export_image_plot()
if self.plot_mode == 'html':
self.export_html_plot()
def export_image_plot(self):
try:
file_name = os.path.join(self.output_folder, f'results_{self.country_name}.png')
self.fig.write_image(os.path.join(self.output_folder, f'results_{self.country_name}.png'))
print(f'El gráfico fue exportado en {file_name}')
if self.show_plot:
self.fig.show()
except ValueError as e:
print("Hubo un error al exportar la imagen")
print("Este error probablemente se debe a que se requiere la instalación de Orca para exportar imágenes")
print("La guía de instalación se encuentra en: https://github.com/plotly/orca")
def export_html_plot(self):
file_name = os.path.join(self.output_folder, f'results_{self.country_name}.html')
self.fig.write_html(file_name)
print(f'El gráfico fue exportado en {file_name}')
if self.show_plot:
self.fig.show()
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,983
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/models/logistic_model.py
|
import warnings
import numpy as np
from scipy.optimize import curve_fit
from .base_model import BaseModel
class LogisticModel(BaseModel):
plot_name = 'Logistic'
@staticmethod
def logistic(t, a, b, c, d):
return c + (d - c) / (1 + a * np.exp(- b * t))
def train(self):
lpopt, lpcov = curve_fit(self.logistic, self.x_train, self.y_train, maxfev=10000)
lerror = np.sqrt(np.diag(lpcov))
# for logistic curve at half maximum, slope = growth rate/2. so doubling time = ln(2) / (growth rate/2)
ldoubletime = np.log(2) / (lpopt[1] / 2)
# standard error
ldoubletimeerror = 1.96 * ldoubletime * np.abs(lerror[1] / lpopt[1])
# calculate R^2
residuals = self.y_train - self.logistic(self.x_train, *lpopt)
ss_res = np.sum(residuals ** 2)
ss_tot = np.sum((self.y_train - np.mean(self.y_train)) ** 2)
logisticr2 = 1 - (ss_res / ss_tot)
self.lpopt = lpopt
if logisticr2 > 0.95:
self.log(f'\n** Ajuste logistico**\n')
self.log(f'\tR^2: {round(logisticr2, 5)}')
self.log(f'\tTiempo para duplicarse (ritmo actual): {round(ldoubletime, 2)} (± {round(ldoubletimeerror, 2)}) días')
else:
warning_message = f'Logistic model is trained but the results might be inaccurate, as the R2 value is {logisticr2}'
warnings.warn(warning_message)
self.r2 = logisticr2
self.is_trained = True
def predict(self):
self.y_pred = self.logistic(self.x_pred, *self.lpopt)
self.is_predicted = True
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,984
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/models/__init__.py
|
from .linear_model import LinearModel
from .logistic_model import LogisticModel
from .exponential_model import ExponentialModel
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,985
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/countries.py
|
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
class CountryData:
def __init__(self):
self.download()
def download(self):
self.df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
def get_country(self, country, dates=True):
co = self.df[self.df['Country/Region'] == country].iloc[:,4:].T.sum(axis = 1)
co = pd.DataFrame(co)
co.columns = ['Cases']
co = co.loc[co['Cases'] > 0]
y = np.array(co['Cases'])
x = np.arange(y.size)
if dates:
start_date = pd.to_datetime(co.index[0], dayfirst=True)
end_date = pd.to_datetime(co.index[-1], dayfirst=True)
x_range = np.array([str(d.date()) for d in pd.date_range(start_date, end_date)])
return np.array([x, y, x_range])
return np.array([x, y])
def show_countries(self, start=None):
if start:
return self.df[self.df['Country/Region'].str.lower().str.contains(start.lower())]['Country/Region'].unique().tolist()
else:
return self.df['Country/Region'].unique().tolist()
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,986
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/models/base_model.py
|
from datetime import timedelta
import pandas as pd
import numpy as np
import plotly.graph_objects as go
class BaseModel:
use_dates = False
is_trained = False
is_predicted = False
record = ''
def __init__(self, x_train, y_train, predict_len=15, plot=True, plot_name='', start_date=None):
self.x_train = np.asarray(x_train, dtype='float64')
self.y_train = np.asarray(y_train, dtype='float64')
self.x_pred = np.arange(x_train.size + int(predict_len))
self.y_pred = np.array([])
self.plot_name = self.get_plot_name()
if start_date:
self.set_date_range(start_date)
self.train()
self.predict()
if plot:
self.chart = self.plot()
def get_plot_name(self):
return self.plot_name
def train(self):
pass
def predict(self):
return
def set_date_range(self, start_date):
end_date = pd.to_datetime(start_date).date() + timedelta(days=self.x_pred.size - 1)
self.date_range = pd.Series([str(d.date()) for d in pd.date_range(str(start_date), end_date)])
self.use_dates = True
def plot(self):
if not self.is_predicted:
raise RuntimeError('Model needs to be predicted before plotting. Please execute the `predict` method')
x = self.date_range if self.use_dates else self.x_pred
return go.Scatter(
x=x,
y=np.round_(self.y_pred),
mode='lines',
name=self.plot_name
)
def log(self, text):
self.record += text
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,987
|
xavierharo/covid-modeler
|
refs/heads/master
|
/test.py
|
import os
import sys
module_path = os.path.abspath(os.path.join('.'))
if module_path not in sys.path:
sys.path.append(module_path)
from modeler.wrapper import Modeler
modeler = Modeler(predict_len=15, output_folder='example_output', mode='cli', plot_mode='html')
print(modeler.c.show_countries())
# modeler.process()
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
39,988
|
xavierharo/covid-modeler
|
refs/heads/master
|
/modeler/models/linear_model.py
|
from .base_model import BaseModel
import numpy as np
from sklearn.linear_model import LinearRegression
class LinearModel(BaseModel):
plot_name = 'Linear'
def train(self):
x, y = np.reshape(self.x_train, (-1, 1)), np.reshape(self.y_train, (-1, 1))
self.model = LinearRegression().fit(x, y)
self.is_trained = True
def predict(self):
y_pred = self.model.predict(self.x_pred.reshape(-1, 1))
self.y_pred = y_pred.reshape(y_pred.size)
self.is_predicted = True
|
{"/modeler/models/exponential_model.py": ["/modeler/models/base_model.py"], "/gui.py": ["/modeler/wrapper.py"], "/modeler/models/logistic_model.py": ["/modeler/models/base_model.py"], "/modeler/models/__init__.py": ["/modeler/models/linear_model.py", "/modeler/models/logistic_model.py", "/modeler/models/exponential_model.py"], "/test.py": ["/modeler/wrapper.py"], "/modeler/models/linear_model.py": ["/modeler/models/base_model.py"]}
|
40,024
|
sumitskr/action-repo
|
refs/heads/main
|
/app.py
|
from flask import Flask, json, request, render_template
from db import *
app = Flask(__name__)
@app.route('/')
def index():
activity_list = activity.find()
return render_template('index.html', activity_list=activity_list)
@app.route('/github', methods=['POST'])
def github_api():
if request.headers['Content-Type'] == 'application/json':
l = request.json
print(l)
if str(l).find('pull_request') == -1 and str(l).find('before') != -1:
request_id = None
from_branch = None
author = l['pusher']['name']
to_branch = l['ref']
action = "PUSH"
query = {'request_id': request_id, 'author': author, 'action': action, 'from_branch': from_branch,
'to_branch': to_branch}
pull_ob = Pull(request_id, author, action, from_branch, to_branch)
pull_ob.commit()
print("push action going on")
return l
elif l['action'] == 'synchronize' and str(l).find('pull_request') != -1:
print("push action going on")
request_id = None
from_branch = None
author = l['pusher']['name']
to_branch = l['ref']
action = "PUSH"
query = {'request_id': request_id, 'author': author, 'action': action, 'from_branch': from_branch,
'to_branch': to_branch}
pull_ob = Pull(request_id, author, action, from_branch, to_branch)
pull_ob.commit()
print("push action going on")
print(l)
return l
elif str(l).find('pull_request') != -1 and str(l).find('before') == -1 and l['action'] == 'opened':
pull_req_by = l
request_id = pull_req_by['pull_request']['id']
author = pull_req_by['pull_request']['user']['login']
action = 'PULL_REQUEST'
from_branch = pull_req_by['pull_request']['head']['label']
to_branch = pull_req_by['pull_request']['base']['label']
query = {'request_id': request_id, 'author': author, 'action': action, 'from_branch': from_branch,
'to_branch': to_branch}
print("pull action going on")
pull_ob = Pull(request_id, author, action, from_branch, to_branch)
pull_ob.commit()
return l
return l
if __name__ == '__main__':
app.run(debug=True)
|
{"/app.py": ["/db.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.