index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
2,401
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/archs/encoder2d.py
|
import torch
import torch.nn as nn
# import hyperparams as hyp
# from utils_basic import *
class Skipnet2d(nn.Module):
def __init__(self, in_chans, mid_chans=64, out_chans=1):
super(Skipnet2d, self).__init__()
conv2d = []
conv2d_transpose = []
up_bn = []
self.down_in_dims = [in_chans, mid_chans, 2*mid_chans]
self.down_out_dims = [mid_chans, 2*mid_chans, 4*mid_chans]
self.down_ksizes = [3, 3, 3]
self.down_strides = [2, 2, 2]
padding = 1
for i, (in_dim, out_dim, ksize, stride) in enumerate(zip(self.down_in_dims, self.down_out_dims, self.down_ksizes, self.down_strides)):
conv2d.append(nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize, stride=stride, padding=padding),
nn.LeakyReLU(),
nn.BatchNorm2d(num_features=out_dim),
))
self.conv2d = nn.ModuleList(conv2d)
self.up_in_dims = [4*mid_chans, 6*mid_chans]
self.up_bn_dims = [6*mid_chans, 3*mid_chans]
self.up_out_dims = [4*mid_chans, 2*mid_chans]
self.up_ksizes = [4, 4]
self.up_strides = [2, 2]
padding = 1 # Note: this only holds for ksize=4 and stride=2!
print('up dims: ', self.up_out_dims)
for i, (in_dim, bn_dim, out_dim, ksize, stride) in enumerate(zip(self.up_in_dims, self.up_bn_dims, self.up_out_dims, self.up_ksizes, self.up_strides)):
conv2d_transpose.append(nn.Sequential(
nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize, stride=stride, padding=padding),
nn.LeakyReLU(),
))
up_bn.append(nn.BatchNorm2d(num_features=bn_dim))
# final 1x1x1 conv to get our desired out_chans
self.final_feature = nn.Conv2d(in_channels=3*mid_chans, out_channels=out_chans, kernel_size=1, stride=1, padding=0)
self.conv2d_transpose = nn.ModuleList(conv2d_transpose)
self.up_bn = nn.ModuleList(up_bn)
def forward(self, inputs):
feat = inputs
skipcons = []
for conv2d_layer in self.conv2d:
feat = conv2d_layer(feat)
skipcons.append(feat)
skipcons.pop() # we don't want the innermost layer as skipcon
for i, (conv2d_transpose_layer, bn_layer) in enumerate(zip(self.conv2d_transpose, self.up_bn)):
feat = conv2d_transpose_layer(feat)
feat = torch.cat([feat, skipcons.pop()], dim=1) # skip connection by concatenation
feat = bn_layer(feat)
feat = self.final_feature(feat)
return feat
if __name__ == "__main__":
net = Skipnet2d(in_chans=4, mid_chans=32, out_chans=3)
print(net.named_parameters)
inputs = torch.rand(2, 4, 128, 384)
out = net(inputs)
print(out.size())
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,402
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/pretrained_nets_carla.py
|
ckpt = '02_s2_m128x32x128_p64x192_1e-3_F2_d32_F3_d32_s.01_O_c1_s.01_V_d32_e1_E2_e.1_n4_d32_c1_E3_n2_c1_mags7i3t_sta41'
ckpt = '02_s2_m128x32x128_1e-3_F3_d32_s.01_O_c2_s.1_E3_n2_c.1_mags7i3t_sta48'
feat3d_init = ckpt
feat3d_dim = 32
occ_init = ckpt
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,403
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/main.py
|
from model_carla_static import CARLA_STATIC
from model_carla_ego import CARLA_EGO
from model_carla_det import CARLA_DET
import hyperparams as hyp
import os
import logging
logger = logging.Logger('catch_all')
def main():
checkpoint_dir_ = os.path.join("checkpoints", hyp.name)
if hyp.do_carla_static:
log_dir_ = os.path.join("logs_carla_static", hyp.name)
elif hyp.do_carla_ego:
log_dir_ = os.path.join("logs_carla_ego", hyp.name)
elif hyp.do_carla_det:
log_dir_ = os.path.join("logs_carla_det", hyp.name)
else:
assert(False) # what mode is this?
if not os.path.exists(checkpoint_dir_):
os.makedirs(checkpoint_dir_)
if not os.path.exists(log_dir_):
os.makedirs(log_dir_)
try:
if hyp.do_carla_static:
model = CARLA_STATIC(
checkpoint_dir=checkpoint_dir_,
log_dir=log_dir_)
model.go()
elif hyp.do_carla_ego:
model = CARLA_EGO(
checkpoint_dir=checkpoint_dir_,
log_dir=log_dir_)
model.go()
elif hyp.do_carla_det:
model = CARLA_DET(
checkpoint_dir=checkpoint_dir_,
log_dir=log_dir_)
model.go()
else:
assert(False) # what mode is this?
except (Exception, KeyboardInterrupt) as ex:
logger.error(ex, exc_info=True)
log_cleanup(log_dir_)
def log_cleanup(log_dir_):
log_dirs = []
for set_name in hyp.set_names:
log_dirs.append(log_dir_ + '/' + set_name)
for log_dir in log_dirs:
for r, d, f in os.walk(log_dir):
for file_dir in f:
file_dir = os.path.join(log_dir, file_dir)
file_size = os.stat(file_dir).st_size
if file_size == 0:
os.remove(file_dir)
if __name__ == '__main__':
main()
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,404
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/nets/flownet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# from spatial_correlation_sampler import SpatialCorrelationSampler
import numpy as np
# import sys
# sys.path.append("..")
import archs.encoder3D
import hyperparams as hyp
import utils_basic
import utils_improc
import utils_misc
import utils_samp
import math
class FlowNet(nn.Module):
def __init__(self):
super(FlowNet, self).__init__()
print('FlowNet...')
self.debug = False
# self.debug = True
self.heatmap_size = hyp.flow_heatmap_size
# self.scales = [0.0625, 0.125, 0.25, 0.5, 0.75, 1.0]
# self.scales = [1.0]
# self.scales = [0.25, 0.5, 1.0]
# self.scales = [0.125, 0.25, 0.5, 0.75, 1.0]
self.scales = [0.25, 0.5, 0.75, 1.0]
self.num_scales = len(self.scales)
# self.compress_dim = 16
# self.compressor = nn.Sequential(
# nn.Conv3d(in_channels=hyp.feat_dim, out_channels=self.compress_dim, kernel_size=1, stride=1, padding=0),
# )
self.correlation_sampler = SpatialCorrelationSampler(
kernel_size=1,
patch_size=self.heatmap_size,
stride=1,
padding=0,
dilation_patch=1,
).cuda()
self.flow_predictor = nn.Sequential(
nn.Conv3d(in_channels=(self.heatmap_size**3), out_channels=64, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv3d(in_channels=64, out_channels=3, kernel_size=1, stride=1, padding=0),
).cuda()
self.smoothl1 = torch.nn.SmoothL1Loss(reduction='none')
self.smoothl1_mean = torch.nn.SmoothL1Loss(reduction='mean')
self.mse = torch.nn.MSELoss(reduction='none')
self.mse_mean = torch.nn.MSELoss(reduction='mean')
print(self.flow_predictor)
def generate_flow(self, feat0, feat1, sc):
B, C, D, H, W = list(feat0.shape)
utils_basic.assert_same_shape(feat0, feat1)
if self.debug:
print('scale = %.2f' % sc)
print('inputs:')
print(feat0.shape)
print(feat1.shape)
if not sc==1.0:
# assert(sc==0.5 or sc==0.25) # please only use 0.25, 0.5, or 1.0 right now
feat0 = F.interpolate(feat0, scale_factor=sc, mode='trilinear', align_corners=False)
feat1 = F.interpolate(feat1, scale_factor=sc, mode='trilinear', align_corners=False)
D, H, W = int(D*sc), int(H*sc), int(W*sc)
if self.debug:
print('downsamps:')
print(feat0.shape)
print(feat1.shape)
feat0 = feat0.contiguous()
feat1 = feat1.contiguous()
cc = self.correlation_sampler(feat0, feat1)
if self.debug:
print('cc:')
print(cc.shape)
cc = cc.view(B, self.heatmap_size**3, D, H, W)
cc = F.relu(cc) # relu works better than leaky relu here
if self.debug:
print(cc.shape)
cc = utils_basic.l2_normalize(cc, dim=1)
flow = self.flow_predictor(cc)
if self.debug:
print('flow:')
print(flow.shape)
if not sc==1.0:
# note 1px here means 1px/sc at the real scale
# first let's put the pixels in the right places
flow = F.interpolate(flow, scale_factor=(1./sc), mode='trilinear', align_corners=False)
# now let's correct the scale
flow = flow/sc
if self.debug:
print('flow up:')
print(flow.shape)
return flow
def forward(self, feat0, feat1, flow_g, mask_g, summ_writer=None):
total_loss = torch.tensor(0.0).cuda()
B, C, D, H, W = list(feat0.shape)
utils_basic.assert_same_shape(feat0, feat1)
# feats = torch.cat([feat0, feat1], dim=0)
# feats = self.compressor(feats)
# feats = utils_basic.l2_normalize(feats, dim=1)
# feat0, feat1 = feats[:B], feats[B:]
flow_total = torch.zeros_like(flow_g)
feat1_aligned = feat1.clone()
# summ_writer.summ_feats('flow/feats_aligned_%.2f' % 0.0, [feat0, feat1_aligned])
feat_diff = torch.mean(utils_basic.l2_on_axis((feat1_aligned-feat0), 1, keepdim=True))
utils_misc.add_loss('flow/feat_align_diff_%.2f' % 0.0, 0, feat_diff, 0, summ_writer)
for sc in self.scales:
flow = self.generate_flow(feat0, feat1_aligned, sc)
flow_total = flow_total + flow
# compositional LK: warp the original thing using the cumulative flow
feat1_aligned = utils_samp.backwarp_using_3D_flow(feat1, flow_total)
valid1_region = utils_samp.backwarp_using_3D_flow(torch.ones_like(feat1[:,0:1]), flow_total)
# summ_writer.summ_feats('flow/feats_aligned_%.2f' % sc, [feat0, feat1_aligned],
# valids=[torch.ones_like(valid1_region), valid1_region])
feat_diff = utils_basic.reduce_masked_mean(
utils_basic.l2_on_axis((feat1_aligned-feat0), 1, keepdim=True), valid1_region)
utils_misc.add_loss('flow/feat_align_diff_%.2f' % sc, 0, feat_diff, 0, summ_writer)
# ok done inference
# now for losses/metrics:
l1_diff_3chan = self.smoothl1(flow_total, flow_g)
l1_diff = torch.mean(l1_diff_3chan, dim=1, keepdim=True)
l2_diff_3chan = self.mse(flow_total, flow_g)
l2_diff = torch.mean(l2_diff_3chan, dim=1, keepdim=True)
nonzero_mask = ((torch.sum(torch.abs(flow_g), axis=1, keepdim=True) > 0.01).float())*mask_g
yeszero_mask = (1.0-nonzero_mask)*mask_g
l1_loss = utils_basic.reduce_masked_mean(l1_diff, mask_g)
l2_loss = utils_basic.reduce_masked_mean(l2_diff, mask_g)
l1_loss_nonzero = utils_basic.reduce_masked_mean(l1_diff, nonzero_mask)
l1_loss_yeszero = utils_basic.reduce_masked_mean(l1_diff, yeszero_mask)
l1_loss_balanced = (l1_loss_nonzero + l1_loss_yeszero)*0.5
l2_loss_nonzero = utils_basic.reduce_masked_mean(l2_diff, nonzero_mask)
l2_loss_yeszero = utils_basic.reduce_masked_mean(l2_diff, yeszero_mask)
l2_loss_balanced = (l2_loss_nonzero + l2_loss_yeszero)*0.5
clip = np.squeeze(torch.max(torch.abs(torch.mean(flow_g[0], dim=0))).detach().cpu().numpy()).item()
if summ_writer is not None:
summ_writer.summ_3D_flow('flow/flow_e_%.2f' % sc, flow_total*mask_g, clip=clip)
summ_writer.summ_3D_flow('flow/flow_g_%.2f' % sc, flow_g, clip=clip)
utils_misc.add_loss('flow/l1_loss_nonzero', 0, l1_loss_nonzero, 0, summ_writer)
utils_misc.add_loss('flow/l1_loss_yeszero', 0, l1_loss_yeszero, 0, summ_writer)
utils_misc.add_loss('flow/l1_loss_balanced', 0, l1_loss_balanced, 0, summ_writer)
total_loss = utils_misc.add_loss('flow/l1_loss', total_loss, l1_loss, hyp.flow_l1_coeff, summ_writer)
total_loss = utils_misc.add_loss('flow/l2_loss', total_loss, l2_loss, hyp.flow_l2_coeff, summ_writer)
total_loss = utils_misc.add_loss('flow/warp', total_loss, feat_diff, hyp.flow_warp_coeff, summ_writer)
# smooth loss
dx, dy, dz = utils_basic.gradient3D(flow_total, absolute=True)
smooth_vox = torch.mean(dx+dy+dx, dim=1, keepdims=True)
if summ_writer is not None:
summ_writer.summ_oned('flow/smooth_loss', torch.mean(smooth_vox, dim=3))
smooth_loss = torch.mean(smooth_vox)
total_loss = utils_misc.add_loss('flow/smooth_loss', total_loss, smooth_loss, hyp.flow_smooth_coeff, summ_writer)
return total_loss, flow_total
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,405
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/archs/bottle2D.py
|
import torch
import torch.nn as nn
import time
# import hyperparams as hyp
# from utils_basic import *
import torch.nn.functional as F
class Bottle2D(nn.Module):
def __init__(self, in_channel, pred_dim, chans=64):
super(Bottle2D, self).__init__()
conv2d = []
# self.out_chans = [chans, 2*chans, 4*chans, 8*chans, 16*chans]
# self.out_chans = [chans, 2*chans, 4*chans, 8*chans]
self.out_chans = [chans, 2*chans, 4*chans]
n_layers = len(self.out_chans)
for i in list(range(n_layers)):
if i==0:
in_dim = in_channel
else:
in_dim = self.out_chans[i-1]
out_dim = self.out_chans[i]
conv2d.append(nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=0),
nn.LeakyReLU(),
nn.BatchNorm2d(num_features=out_dim),
))
self.conv2d = nn.ModuleList(conv2d)
hidden_dim = 1024
self.linear_layers = nn.Sequential(
nn.Linear(self.out_chans[-1]*2*2*2, hidden_dim),
nn.LeakyReLU(),
nn.Linear(hidden_dim, pred_dim),
)
def forward(self, feat):
B, C, Z, X = list(feat.shape)
# print(feat.shape)
for conv2d_layer in self.conv2d:
feat = conv2d_layer(feat)
# print('bottle', feat.shape)
feat = feat.reshape(B, -1)
# print('bottle', feat.shape)
# feat = self.linear_layers(feat)
return feat
class ResNetBottle2D(nn.Module):
def __init__(self, in_channel, pred_dim, chans=64):
super(ResNetBottle2D, self).__init__()
# first lqyer - downsampling
in_dim, out_dim, ksize, stride, padding = in_channel, chans, 4, 2, 1
self.down_sampler0 = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
)
in_dim, out_dim, ksize, stride, padding = chans, chans, 3, 1, 1
self.res_block1 = self.generate_block(in_dim, out_dim, ksize, stride, padding)
self.res_block2 = self.generate_block(in_dim, out_dim, ksize, stride, padding)
self.res_block3 = self.generate_block(in_dim, out_dim, ksize, stride, padding)
self.res_block4 = self.generate_block(in_dim, out_dim, ksize, stride, padding)
self.down_sampler1 = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
)
self.down_sampler2 = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
)
self.down_sampler3 = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
)
self.down_sampler4 = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
)
self.lrelu = nn.LeakyReLU()
# # final 1x1x1 conv to get our desired pred_dim
# self.final_feature = nn.Conv2d(in_channels=chans, out_channels=pred_dim, kernel_size=1, stride=1, padding=0)
self.linear_layers = nn.Sequential(
nn.Linear(out_dim*2*2*2, 512),
nn.LeakyReLU(),
nn.Linear(64, pred_dim),
)
def generate_block(self, in_dim, out_dim, ksize, stride, padding):
block = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(num_features=out_dim),
nn.LeakyReLU(),
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1),
nn.BatchNorm2d(num_features=out_dim),
)
return block
def forward(self, feat):
B, C, Z, Y, X = list(feat.shape)
feat = self.down_sampler0(feat)
# print(feat.shape)
feat_before = feat
feat_after = self.res_block1(feat)
feat = feat_before + feat_after
feat = self.lrelu(feat)
feat = self.down_sampler1(feat)
# print(feat.shape)
feat_before = feat
feat_after = self.res_block2(feat)
feat = feat_before + feat_after
feat = self.lrelu(feat)
feat = self.down_sampler2(feat)
# print(feat.shape)
feat_before = feat
feat_after = self.res_block3(feat)
feat = feat_before + feat_after
feat = self.lrelu(feat)
feat = self.down_sampler3(feat)
# print(feat.shape)
feat_before = feat
feat_after = self.res_block4(feat)
feat = feat_before + feat_after
feat = self.lrelu(feat)
feat = self.down_sampler4(feat)
print(feat.shape)
feat = feat.reshape(B, -1)
feat = self.linear_layers(feat)
# print(feat.shape)
return feat
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,406
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/nets/emb2dnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("..")
import archs.encoder2d as encoder2d
import hyperparams as hyp
import utils.basic
import utils.misc
import utils.improc
class Emb2dNet(nn.Module):
def __init__(self):
super(Emb2dNet, self).__init__()
print('Emb2dNet...')
self.batch_k = 2
self.num_samples = hyp.emb2d_num_samples
assert(self.num_samples > 0)
self.sampler = utils.misc.DistanceWeightedSampling(batch_k=self.batch_k, normalize=False)
self.criterion = utils.misc.MarginLoss() #margin=args.margin,nu=args.nu)
self.beta = 1.2
self.dict_len = 20000
self.neg_pool = utils.misc.SimplePool(self.dict_len, version='pt')
self.ce = torch.nn.CrossEntropyLoss()
def sample_embs(self, emb0, emb1, valid, B, Y, X, mod='', do_vis=False, summ_writer=None):
if hyp.emb2d_mindist == 0.0:
# pure random
perm = torch.randperm(B*Y*X)
emb0 = emb0.reshape(B*Y*X, -1)
emb1 = emb1.reshape(B*Y*X, -1)
valid = valid.reshape(B*Y*X, -1)
emb0 = emb0[perm[:self.num_samples*B]]
emb1 = emb1[perm[:self.num_samples*B]]
valid = valid[perm[:self.num_samples*B]]
return emb0, emb1, valid
else:
emb0_all = []
emb1_all = []
valid_all = []
for b in list(range(B)):
sample_indices, sample_locs, sample_valids = utils.misc.get_safe_samples(
valid[b], (Y, X), self.num_samples, mode='2d', tol=hyp.emb2d_mindist)
emb0_s_ = emb0[b, sample_indices]
emb1_s_ = emb1[b, sample_indices]
# these are N x D
emb0_all.append(emb0_s_)
emb1_all.append(emb1_s_)
valid_all.append(sample_valids)
if do_vis and (summ_writer is not None):
sample_mask = utils.improc.xy2mask_single(sample_locs, Y, X)
summ_writer.summ_oned('emb2d/samples_%s/sample_mask' % mod, torch.unsqueeze(sample_mask, dim=0))
summ_writer.summ_oned('emb2d/samples_%s/valid' % mod, torch.reshape(valid, [B, 1, Y, X]))
emb0_all = torch.cat(emb0_all, axis=0)
emb1_all = torch.cat(emb1_all, axis=0)
valid_all = torch.cat(valid_all, axis=0)
return emb0_all, emb1_all, valid_all
def compute_margin_loss(self, B, C, Y, X, emb0_vec, emb1_vec, valid_vec, mod='', do_vis=False, summ_writer=None):
emb0_vec, emb1_vec, valid_vec = self.sample_embs(
emb0_vec,
emb1_vec,
valid_vec,
B, Y, X,
mod=mod,
do_vis=do_vis,
summ_writer=summ_writer)
emb_vec = torch.stack((emb0_vec, emb1_vec), dim=1).view(B*self.num_samples*self.batch_k,C)
# this tensor goes e,g,e,g,... on dim 0
# note this means 2 samples per class; batch_k=2
y = torch.stack([torch.arange(0,self.num_samples*B), torch.arange(0,self.num_samples*B)], dim=1).view(self.num_samples*B*self.batch_k)
# this tensor goes 0,0,1,1,2,2,...
a_indices, anchors, positives, negatives, _ = self.sampler(emb_vec)
margin_loss, _ = self.criterion(anchors, positives, negatives, self.beta, y[a_indices])
return margin_loss
def compute_ce_loss(self, B, C, Y, X, emb_e_vec_all, emb_g_vec_all, valid_vec_all, mod='', do_vis=False, summ_writer=None):
emb_e_vec, emb_g_vec, valid_vec = self.sample_embs(emb_e_vec_all,
emb_g_vec_all,
valid_vec_all,
B, Y, X,
mod=mod,
do_vis=do_vis,
summ_writer=summ_writer)
_, emb_n_vec, _ = self.sample_embs(emb_e_vec_all,
emb_g_vec_all,
valid_vec_all,
B, Y, X,
mod=mod,
do_vis=do_vis,
summ_writer=summ_writer)
emb_e_vec = emb_e_vec.view(B*self.num_samples, C)
emb_g_vec = emb_g_vec.view(B*self.num_samples, C)
emb_n_vec = emb_n_vec.view(B*self.num_samples, C)
self.neg_pool.update(emb_n_vec.cpu())
# print('neg_pool len:', len(self.neg_pool))
emb_n = self.neg_pool.fetch().cuda()
# print('emb_n', emb_n.shape)
N2, C2 = list(emb_n.shape)
assert (C2 == C)
# l_negs = torch.mm(q.view(N, C), negs.view(C, N2)) # this is N x N2
emb_q = emb_e_vec.clone()
emb_k = emb_g_vec.clone()
# print('emb_q', emb_q.shape)
# print('emb_k', emb_k.shape)
N = emb_q.shape[0]
l_pos = torch.bmm(emb_q.view(N,1,-1), emb_k.view(N,-1,1))
# print('l_pos', l_pos.shape)
l_neg = torch.mm(emb_q, emb_n.T)
# print('l_neg', l_neg.shape)
l_pos = l_pos.view(N, 1)
# print('l_pos', l_pos.shape)
logits = torch.cat([l_pos, l_neg], dim=1)
labels = torch.zeros(N, dtype=torch.long).cuda()
temp = 0.07
emb_loss = self.ce(logits/temp, labels)
# print('emb_loss', emb_loss.detach().cpu().numpy())
return emb_loss
def forward(self, emb_e, emb_g, valid, summ_writer=None, suffix=''):
total_loss = torch.tensor(0.0).cuda()
if torch.isnan(emb_e).any() or torch.isnan(emb_g).any():
assert(False)
B, C, H, W = list(emb_e.shape)
# put channels on the end
emb_e_vec = emb_e.permute(0,2,3,1).reshape(B, H*W, C)
emb_g_vec = emb_g.permute(0,2,3,1).reshape(B, H*W, C)
valid_vec = valid.permute(0,2,3,1).reshape(B, H*W, 1)
assert(self.num_samples < (B*H*W))
# we will take num_samples from each one
margin_loss = self.compute_margin_loss(B, C, H, W, emb_e_vec, emb_g_vec, valid_vec, 'all', True, summ_writer)
total_loss = utils.misc.add_loss('emb2d/emb2d_ml_loss%s' % suffix, total_loss, margin_loss, hyp.emb2d_ml_coeff, summ_writer)
ce_loss = self.compute_ce_loss(B, C, H, W, emb_e_vec, emb_g_vec.detach(), valid_vec, 'g', False, summ_writer)
total_loss = utils.misc.add_loss('emb2d/emb_ce_loss', total_loss, ce_loss, hyp.emb2d_ce_coeff, summ_writer)
l2_loss_im = utils.basic.sql2_on_axis(emb_e-emb_g.detach(), 1, keepdim=True)
emb_l2_loss = utils.basic.reduce_masked_mean(l2_loss_im, valid)
total_loss = utils.misc.add_loss('emb2d/emb2d_l2_loss%s' % suffix, total_loss, emb_l2_loss, hyp.emb2d_l2_coeff, summ_writer)
if summ_writer is not None:
summ_writer.summ_oned('emb2d/emb2d_l2_loss%s' % suffix, l2_loss_im)
summ_writer.summ_feats('emb2d/embs_2d%s' % suffix, [emb_e, emb_g], pca=True)
return total_loss, emb_g
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,407
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/nets/egonet.py
|
import numpy as np
import hyperparams as hyp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.ops as ops
import utils.basic
import utils.improc
import utils.geom
import utils.misc
import utils.samp
EPS = 1e-6
# acknowledgement:
# Niles Christensen and Sohil Samir Savla developed the pytorch port of the original egonet.py, written in tensorflow
def eval_against_gt(loss, cam0_T_cam1_e, cam0_T_cam1_g,
t_coeff=0.0, deg_coeff=0.0, sc=1.0,
summ_writer=None):
# cam0_T_cam1_e is B x 4 x 4
# cam0_T_cam1_g is B x 4 x 4
r_e, t_e = utils.geom.split_rt(cam0_T_cam1_e)
r_g, t_g = utils.geom.split_rt(cam0_T_cam1_g)
_, ry_e, _ = utils.geom.rotm2eul(r_e)
_, ry_g, _ = utils.geom.rotm2eul(r_g)
deg_e = torch.unsqueeze(utils.geom.rad2deg(ry_e), axis=-1)
deg_g = torch.unsqueeze(utils.geom.rad2deg(ry_g), axis=-1)
t_l2 = torch.mean(utils.basic.sql2_on_axis(t_e-t_g, 1))
loss = utils.misc.add_loss('t_sql2_%.2f' % sc,
loss,
t_l2,
t_coeff,
summ_writer=summ_writer)
deg_l2 = torch.mean(utils.basic.sql2_on_axis(deg_e-deg_g, 1))
loss = utils.misc.add_loss('deg_sql2_%.2f' % sc,
loss,
deg_l2,
deg_coeff,
summ_writer=summ_writer)
return loss
def cost_volume_3D(vox0, vox1,
max_disp_z=4,
max_disp_y=1,
max_disp_x=4):
# max_disp = max_displacement
# vox0 is B x C x Z x Y x X
# vox1 is B x C x Z x Y x X
# return cost_vol, shaped B x E x Z x Y x X
# E_i = max_disp_i*2 + 1
# E = \prod E_i
# pad the top, bottom, left, and right of vox1
ones = torch.ones_like(vox1)
vox1_pad = F.pad(vox1,
(max_disp_z, max_disp_z,
max_disp_y, max_disp_y,
max_disp_x, max_disp_x),
'constant', 0)
ones_pad = F.pad(ones,
(max_disp_z, max_disp_z,
max_disp_y, max_disp_y,
max_disp_x, max_disp_x),
'constant', 0)
_, _, d, h, w = vox0.shape
loop_range1 = max_disp_z * 2 + 1
loop_range2 = max_disp_y * 2 + 1
loop_range3 = max_disp_x * 2 + 1
cost_vol = []
for z in range(0, loop_range1):
for y in range(0, loop_range2):
for x in range(0, loop_range3):
vox1_slice = vox1_pad[:, :, z:z+d, y:y+h, x:x+w]
ones_slice = ones_pad[:, :, z:z+d, y:y+h, x:x+w]
cost = utils.basic.reduce_masked_mean(vox0*vox1_slice, ones_slice, dim=1, keepdim=True)
cost_vol.append(cost)
cost_vol = torch.cat(cost_vol, axis=1)
return cost_vol
class EgoNet(nn.Module):
def __init__(self,
num_scales=1,
num_rots=3,
max_deg=4,
max_disp_z=1,
max_disp_y=1,
max_disp_x=1):
print('EgoNet...')
super(EgoNet, self).__init__()
if num_scales:
self.scales = [1]
elif num_scales==2:
self.scales = [0.5, 1]
else:
assert(False) # only 1-2 scales supported right now
self.R = num_rots
self.max_deg = max_deg # max degrees rotation, on either side of zero
self.max_disp_z = max_disp_z
self.max_disp_y = max_disp_y
self.max_disp_x = max_disp_x
self.E1 = self.max_disp_z*2 + 1
self.E2 = self.max_disp_y*2 + 1
self.E3 = self.max_disp_x*2 + 1
self.E = self.E1*self.E2*self.E3
self.first_layer = nn.Linear(self.R*self.E, 128).cuda()
self.second_layer = nn.Linear(128, 128).cuda()
self.third_layer = nn.Linear(128, 4).cuda()
def forward(self, feat0, feat1, cam0_T_cam1_g, vox_util, summ_writer, reuse=False):
total_loss = 0.0
utils.basic.assert_same_shape(feat0, feat1)
summ_writer.summ_feats('ego/feats', [feat0, feat1], pca=True)
total_loss, cam0_T_cam1_e, feat1_warped = self.multi_scale_corr3Dr(
total_loss, feat0, feat1, vox_util, summ_writer, cam0_T_cam1_g, reuse=reuse)
return total_loss, cam0_T_cam1_e, feat1_warped
def multi_scale_corr3Dr(self, total_loss, feat0, feat1, vox_util, summ_writer, cam0_T_cam1_g=None, reuse=False, do_print=False):
# the idea here is:
# at each scale, find the answer, and then warp
# to make the next scale closer to the answer
# this allows a small displacement to be effective at each scale
alignments = []
B, C, Z, Y, X = list(feat0.size())
utils.basic.assert_same_shape(feat0, feat1)
summ_writer.summ_feat('ego/feat0', feat0, pca=True)
summ_writer.summ_feat('ego/feat1', feat1, pca=True)
if (cam0_T_cam1_g is not None):
eye = utils.geom.eye_4x4(B)
_ = eval_against_gt(0, eye, cam0_T_cam1_g, sc=0.0, summ_writer=summ_writer)
feat1_backup = feat1.clone()
rots = torch.linspace(-self.max_deg, self.max_deg, self.R)
rots = torch.reshape(rots, [self.R])
rot_cam_total = torch.zeros([B])
delta_cam_total = torch.zeros([B, 3])
for sc in self.scales:
Z_ = int(Z*sc)
Y_ = int(Y*sc)
X_ = int(X*sc)
if not sc==1.0:
feat0_ = F.interpolate(feat0, scale_factor=sc, mode='trilinear')
feat1_ = F.interpolate(feat1, scale_factor=sc, mode='trilinear')
else:
feat0_ = feat0.clone()
feat1_ = feat1.clone()
# have a heatmap at least sized 3, so that an argmax is capable of returning 0
valid_Z = Z_-self.max_disp_z*2
valid_Y = Y_-self.max_disp_y*2
valid_X = X_-self.max_disp_x*2
assert(valid_Z >= 3)
assert(valid_Y >= 3)
assert(valid_X >= 3)
summ_writer.summ_feat('ego/feat0_resized_%.3f' % sc, feat0_, pca=True)
summ_writer.summ_feat('ego/feat1_resized_%.3f' % sc, feat1_, pca=True)
## now we want to rotate the features into all of the orientations
# first we define the orientations
r0 = torch.zeros([B*self.R])
ry = torch.unsqueeze(rots, axis=0).repeat([B, 1]).reshape([B*self.R])
r = utils.geom.eul2rotm(r0, utils.geom.deg2rad(ry), r0)
t = torch.zeros([B*self.R, 3])
# this will carry us from "1" coords to "N" (new) coords
camN_T_cam1 = utils.geom.merge_rt(r, t)
# this is B*R x 4 x 4
# we want to apply this to feat1
# we first need the feats to lead with B*R
feat0_ = torch.unsqueeze(feat0_, axis=1).repeat([1, self.R, 1, 1, 1, 1])
feat1_ = torch.unsqueeze(feat1_, axis=1).repeat([1, self.R, 1, 1, 1, 1])
feat0_ = feat0_.reshape([B*self.R, C, Z_, Y_, X_])
feat1_ = feat1_.reshape([B*self.R, C, Z_, Y_, X_])
featN_ = vox_util.apply_4x4_to_vox(camN_T_cam1, feat1_)
featN__ = featN_.reshape([B, self.R, C, Z_, Y_, X_])
summ_writer.summ_feats('ego/featN_%.3f_postwarp' % sc, torch.unbind(featN__, axis=1), pca=False)
cc = cost_volume_3D(feat0_,
featN_,
max_disp_z=self.max_disp_z,
max_disp_y=self.max_disp_y,
max_disp_x=self.max_disp_x)
# cc is B*R x Z_ x Y_ x X_ x E,
# i.e., each spatial location has a heatmap squished into the E dim
# reduce along the spatial dims
heat = torch.sum(cc, axis=[2,3,4])
# flesh out the heatmaps
heat = heat.reshape([B, self.R, 1, self.E1, self.E2, self.E3])
# have a look
summ_writer.summ_oned('ego/heat_%.3f' % sc, torch.mean(heat[0], axis=-2, keepdim=False))
feat = heat.reshape([B, self.R*self.E])
feat = F.leaky_relu(feat, negative_slope=0.1)
# relja said normalizing helps:
feat_norm = utils.basic.l2_on_axis(feat, 1, keepdim=True)
feat = feat/(EPS+feat_norm)
feat = self.first_layer(feat)
feat = F.leaky_relu(feat, negative_slope=0.1)
feat = self.second_layer(feat)
feat = F.leaky_relu(feat, negative_slope=0.1)
feat = self.third_layer(feat)
r, y, x, z = torch.unbind(feat, axis=1)
# convert the mem argmax into a translation in cam coords
xyz_argmax_mem = torch.unsqueeze(torch.stack([x, y, z], axis=1), axis=1)
xyz_zero_mem = torch.zeros([B, 1, 3])
# in the transformation, use Y*sc instead of Y_, in case we cropped instead of scaled
xyz_argmax_cam = vox_util.Mem2Ref(xyz_argmax_mem.cuda(), int(Z*sc), int(Y*sc), int(X*sc))
xyz_zero_cam = vox_util.Mem2Ref(xyz_zero_mem.cuda(), int(Z*sc), int(Y*sc), int(X*sc))
xyz_delta_cam = xyz_argmax_cam-xyz_zero_cam
# mem is aligned with cam, and scaling does not affect rotation
rot_cam = r.clone()
summ_writer.summ_histogram('xyz_delta_cam', xyz_delta_cam)
summ_writer.summ_histogram('rot_cam', rot_cam)
delta_cam_total += xyz_delta_cam.reshape([B, 3]).cpu()
rot_cam_total += rot_cam.cpu()
r0 = torch.zeros([B])
cam0_T_cam1_e = utils.geom.merge_rt(utils.geom.eul2rotm(r0,
utils.geom.deg2rad(rot_cam_total),
r0),
-delta_cam_total)
# bring feat1_backup into alignment with feat0, using the cumulative RT
# if the estimate were perfect, this would yield feat0, but let's continue to call it feat1
feat1 = vox_util.apply_4x4_to_vox(cam0_T_cam1_e, feat1_backup)
# we will use feat1 in the next iteration of the loop
if (cam0_T_cam1_g is not None):
total_loss = eval_against_gt(total_loss, cam0_T_cam1_e, cam0_T_cam1_g,
t_coeff=hyp.ego_t_l2_coeff*sc,
deg_coeff=hyp.ego_deg_l2_coeff*sc,
sc=sc,
summ_writer=summ_writer)
return total_loss, cam0_T_cam1_e, feat1
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,408
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/nets/viewnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("..")
import archs.renderer
import hyperparams as hyp
from utils.basic import *
import utils.improc
import utils.basic
import utils.misc
import utils.geom
class ViewNet(nn.Module):
def __init__(self):
super(ViewNet, self).__init__()
print('ViewNet...')
self.net = archs.renderer.Net3d2d(hyp.feat3d_dim, 64, 32, hyp.view_depth, depth_pool=8).cuda()
self.rgb_layer = nn.Sequential(
nn.LeakyReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(),
nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0),
).cuda()
self.emb_layer = nn.Sequential(
nn.LeakyReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(),
nn.Conv2d(32, hyp.feat2d_dim, kernel_size=1, stride=1, padding=0),
).cuda()
print(self.net)
def forward(self, pix_T_cam0, cam0_T_cam1, feat_mem1, rgb_g, vox_util, valid=None, summ_writer=None, test=False, suffix=''):
total_loss = torch.tensor(0.0).cuda()
B, C, H, W = list(rgb_g.shape)
PH, PW = hyp.PH, hyp.PW
if (PH < H) or (PW < W):
# print('H, W', H, W)
# print('PH, PW', PH, PW)
sy = float(PH)/float(H)
sx = float(PW)/float(W)
pix_T_cam0 = utils.geom.scale_intrinsics(pix_T_cam0, sx, sy)
if valid is not None:
valid = F.interpolate(valid, scale_factor=0.5, mode='nearest')
rgb_g = F.interpolate(rgb_g, scale_factor=0.5, mode='bilinear')
feat_proj = vox_util.apply_pixX_T_memR_to_voxR(
pix_T_cam0, cam0_T_cam1, feat_mem1,
hyp.view_depth, PH, PW)
feat = self.net(feat_proj)
rgb = self.rgb_layer(feat)
emb = self.emb_layer(feat)
emb = utils.basic.l2_normalize(emb, dim=1)
if test:
return None, rgb, None
loss_im = utils.basic.l1_on_axis(rgb-rgb_g, 1, keepdim=True)
if valid is not None:
rgb_loss = utils.basic.reduce_masked_mean(loss_im, valid)
else:
rgb_loss = torch.mean(loss_im)
total_loss = utils.misc.add_loss('view/rgb_l1_loss', total_loss, rgb_loss, hyp.view_l1_coeff, summ_writer)
# vis
if summ_writer is not None:
summ_writer.summ_oned('view/rgb_loss', loss_im)
summ_writer.summ_rgbs('view/rgb', [rgb.clamp(-0.5, 0.5), rgb_g])
summ_writer.summ_rgb('view/rgb_e', rgb.clamp(-0.5, 0.5))
summ_writer.summ_rgb('view/rgb_g', rgb_g.clamp(-0.5, 0.5))
summ_writer.summ_feat('view/emb', emb, pca=True)
if valid is not None:
summ_writer.summ_rgb('view/rgb_e_valid', valid*rgb.clamp(-0.5, 0.5))
summ_writer.summ_rgb('view/rgb_g_valid', valid*rgb_g.clamp(-0.5, 0.5))
return total_loss, rgb, emb
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,409
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/exp_base.py
|
import pretrained_nets_carla as pret_carla
exps = {}
groups = {}
############## training settings ##############
groups['train_feat3d'] = [
'do_feat3d = True',
'feat3d_dim = 32',
# 'feat3d_smooth_coeff = 0.01',
]
groups['train_det'] = [
'do_det = True',
'det_prob_coeff = 1.0',
'det_reg_coeff = 1.0',
]
############## dataset settings ##############
H = 128
W = 384
groups['seqlen1'] = [
'trainset_seqlen = 1',
'valset_seqlen = 1',
]
groups['8-4-8_bounds'] = [
'XMIN = -8.0', # right (neg is left)
'XMAX = 8.0', # right
'YMIN = -4.0', # down (neg is up)
'YMAX = 4.0', # down
'ZMIN = -8.0', # forward (neg is backward)
'ZMAX = 8.0', # forward
]
groups['16-4-16_bounds'] = [
'XMIN = -16.0', # right (neg is left)
'XMAX = 16.0', # right
'YMIN = -4.0', # down (neg is up)
'YMAX = 4.0', # down
'ZMIN = -16.0', # forward (neg is backward)
'ZMAX = 16.0', # forward
]
groups['16-8-16_bounds'] = [
'XMIN = -16.0', # right (neg is left)
'XMAX = 16.0', # right
'YMIN = -8.0', # down (neg is up)
'YMAX = 8.0', # down
'ZMIN = -16.0', # forward (neg is backward)
'ZMAX = 16.0', # forward
]
dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"
groups['carla_multiview_10_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3ten"',
'trainset_format = "multiview"',
# 'trainset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_train_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3t"',
'trainset_format = "multiview"',
# 'trainset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_test_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'testset = "mags7i3v"',
'testset_format = "multiview"',
# 'testset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_train_val_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3t"',
'trainset_format = "multiview"',
# 'trainset_seqlen = %d' % S,
'valset = "mags7i3v"',
'valset_format = "multiview"',
# 'valset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
############## other settings ##############
groups['include_summs'] = [
'do_include_summs = True',
]
groups['decay_lr'] = ['do_decay_lr = True']
groups['clip_grad'] = ['do_clip_grad = True']
# groups['quick_snap'] = ['snap_freq = 500']
# groups['quicker_snap'] = ['snap_freq = 50']
# groups['quickest_snap'] = ['snap_freq = 5']
groups['snap500'] = ['snap_freq = 500']
groups['snap1k'] = ['snap_freq = 1000']
groups['snap5k'] = ['snap_freq = 5000']
groups['no_shuf'] = ['shuffle_train = False',
'shuffle_val = False',
'shuffle_test = False',
]
groups['time_flip'] = ['do_time_flip = True']
groups['no_backprop'] = ['backprop_on_train = False',
'backprop_on_val = False',
'backprop_on_test = False',
]
groups['train_on_trainval'] = ['backprop_on_train = True',
'backprop_on_val = True',
'backprop_on_test = False',
]
groups['B1'] = ['trainset_batch_size = 1']
groups['B2'] = ['trainset_batch_size = 2']
groups['B4'] = ['trainset_batch_size = 4']
groups['B6'] = ['trainset_batch_size = 6']
groups['B8'] = ['trainset_batch_size = 8']
groups['B10'] = ['trainset_batch_size = 10']
groups['B12'] = ['trainset_batch_size = 12']
groups['B16'] = ['trainset_batch_size = 16']
groups['B24'] = ['trainset_batch_size = 24']
groups['B32'] = ['trainset_batch_size = 32']
groups['B64'] = ['trainset_batch_size = 64']
groups['B128'] = ['trainset_batch_size = 128']
groups['vB1'] = ['valset_batch_size = 1']
groups['vB2'] = ['valset_batch_size = 2']
groups['vB4'] = ['valset_batch_size = 4']
groups['vB8'] = ['valset_batch_size = 8']
groups['lr0'] = ['lr = 0.0']
groups['lr1'] = ['lr = 1e-1']
groups['lr2'] = ['lr = 1e-2']
groups['lr3'] = ['lr = 1e-3']
groups['2lr4'] = ['lr = 2e-4']
groups['5lr4'] = ['lr = 5e-4']
groups['lr4'] = ['lr = 1e-4']
groups['lr5'] = ['lr = 1e-5']
groups['lr6'] = ['lr = 1e-6']
groups['lr7'] = ['lr = 1e-7']
groups['lr8'] = ['lr = 1e-8']
groups['lr9'] = ['lr = 1e-9']
groups['lr12'] = ['lr = 1e-12']
groups['1_iters'] = ['max_iters = 1']
groups['2_iters'] = ['max_iters = 2']
groups['3_iters'] = ['max_iters = 3']
groups['5_iters'] = ['max_iters = 5']
groups['6_iters'] = ['max_iters = 6']
groups['9_iters'] = ['max_iters = 9']
groups['21_iters'] = ['max_iters = 21']
groups['7_iters'] = ['max_iters = 7']
groups['10_iters'] = ['max_iters = 10']
groups['15_iters'] = ['max_iters = 15']
groups['20_iters'] = ['max_iters = 20']
groups['25_iters'] = ['max_iters = 25']
groups['30_iters'] = ['max_iters = 30']
groups['50_iters'] = ['max_iters = 50']
groups['100_iters'] = ['max_iters = 100']
groups['150_iters'] = ['max_iters = 150']
groups['200_iters'] = ['max_iters = 200']
groups['250_iters'] = ['max_iters = 250']
groups['300_iters'] = ['max_iters = 300']
groups['397_iters'] = ['max_iters = 397']
groups['400_iters'] = ['max_iters = 400']
groups['447_iters'] = ['max_iters = 447']
groups['500_iters'] = ['max_iters = 500']
groups['850_iters'] = ['max_iters = 850']
groups['1000_iters'] = ['max_iters = 1000']
groups['2000_iters'] = ['max_iters = 2000']
groups['2445_iters'] = ['max_iters = 2445']
groups['3000_iters'] = ['max_iters = 3000']
groups['4000_iters'] = ['max_iters = 4000']
groups['4433_iters'] = ['max_iters = 4433']
groups['5000_iters'] = ['max_iters = 5000']
groups['10000_iters'] = ['max_iters = 10000']
groups['1k_iters'] = ['max_iters = 1000']
groups['2k_iters'] = ['max_iters = 2000']
groups['5k_iters'] = ['max_iters = 5000']
groups['10k_iters'] = ['max_iters = 10000']
groups['20k_iters'] = ['max_iters = 20000']
groups['30k_iters'] = ['max_iters = 30000']
groups['40k_iters'] = ['max_iters = 40000']
groups['50k_iters'] = ['max_iters = 50000']
groups['60k_iters'] = ['max_iters = 60000']
groups['80k_iters'] = ['max_iters = 80000']
groups['100k_iters'] = ['max_iters = 100000']
groups['100k10_iters'] = ['max_iters = 100010']
groups['200k_iters'] = ['max_iters = 200000']
groups['300k_iters'] = ['max_iters = 300000']
groups['400k_iters'] = ['max_iters = 400000']
groups['500k_iters'] = ['max_iters = 500000']
groups['resume'] = ['do_resume = True']
groups['reset_iter'] = ['reset_iter = True']
groups['log1'] = [
'log_freq_train = 1',
'log_freq_val = 1',
'log_freq_test = 1',
]
groups['log5'] = [
'log_freq_train = 5',
'log_freq_val = 5',
'log_freq_test = 5',
]
groups['log10'] = [
'log_freq_train = 10',
'log_freq_val = 10',
'log_freq_test = 10',
]
groups['log50'] = [
'log_freq_train = 50',
'log_freq_val = 50',
'log_freq_test = 50',
]
groups['log500'] = [
'log_freq_train = 500',
'log_freq_val = 500',
'log_freq_test = 500',
]
groups['log5000'] = [
'log_freq_train = 5000',
'log_freq_val = 5000',
'log_freq_test = 5000',
]
groups['no_logging'] = [
'log_freq_train = 100000000000',
'log_freq_val = 100000000000',
'log_freq_test = 100000000000',
]
# ############## pretrained nets ##############
# groups['pretrained_sigen3d'] = [
# 'do_sigen3d = True',
# 'sigen3d_init = "' + pret_carla.sigen3d_init + '"',
# ]
# groups['pretrained_conf'] = [
# 'do_conf = True',
# 'conf_init = "' + pret_carla.conf_init + '"',
# ]
# groups['pretrained_up3D'] = [
# 'do_up3D = True',
# 'up3D_init = "' + pret_carla.up3D_init + '"',
# ]
# groups['pretrained_center'] = [
# 'do_center = True',
# 'center_init = "' + pret_carla.center_init + '"',
# ]
# groups['pretrained_seg'] = [
# 'do_seg = True',
# 'seg_init = "' + pret_carla.seg_init + '"',
# ]
# groups['pretrained_motionreg'] = [
# 'do_motionreg = True',
# 'motionreg_init = "' + pret_carla.motionreg_init + '"',
# ]
# groups['pretrained_gen3d'] = [
# 'do_gen3d = True',
# 'gen3d_init = "' + pret_carla.gen3d_init + '"',
# ]
# groups['pretrained_vq2d'] = [
# 'do_vq2d = True',
# 'vq2d_init = "' + pret_carla.vq2d_init + '"',
# 'vq2d_num_embeddings = %d' % pret_carla.vq2d_num_embeddings,
# ]
# groups['pretrained_vq3d'] = [
# 'do_vq3d = True',
# 'vq3d_init = "' + pret_carla.vq3d_init + '"',
# 'vq3d_num_embeddings = %d' % pret_carla.vq3d_num_embeddings,
# ]
# groups['pretrained_feat2D'] = [
# 'do_feat2D = True',
# 'feat2D_init = "' + pret_carla.feat2D_init + '"',
# 'feat2D_dim = %d' % pret_carla.feat2D_dim,
# ]
groups['pretrained_feat3d'] = [
'do_feat3d = True',
'feat3d_init = "' + pret_carla.feat3d_init + '"',
'feat3d_dim = %d' % pret_carla.feat3d_dim,
]
groups['pretrained_occ'] = [
'do_occ = True',
'occ_init = "' + pret_carla.occ_init + '"',
]
# groups['pretrained_match'] = [
# 'do_match = True',
# 'match_init = "' + pret_carla.match_init + '"',
# ]
# groups['pretrained_rigid'] = [
# 'do_rigid = True',
# 'rigid_init = "' + pret_carla.rigid_init + '"',
# ]
# # groups['pretrained_pri2D'] = [
# # 'do_pri2D = True',
# # 'pri2D_init = "' + pret_carla.pri2D_init + '"',
# # ]
# groups['pretrained_det'] = [
# 'do_det = True',
# 'det_init = "' + pret_carla.det_init + '"',
# ]
# groups['pretrained_forecast'] = [
# 'do_forecast = True',
# 'forecast_init = "' + pret_carla.forecast_init + '"',
# ]
# groups['pretrained_view'] = [
# 'do_view = True',
# 'view_init = "' + pret_carla.view_init + '"',
# 'view_depth = %d' % pret_carla.view_depth,
# 'feat2D_dim = %d' % pret_carla.feat2D_dim,
# # 'view_use_halftanh = ' + str(pret_carla.view_use_halftanh),
# # 'view_pred_embs = ' + str(pret_carla.view_pred_embs),
# # 'view_pred_rgb = ' + str(pret_carla.view_pred_rgb),
# ]
# groups['pretrained_flow'] = ['do_flow = True',
# 'flow_init = "' + pret_carla.flow_init + '"',
# ]
# # groups['pretrained_tow'] = ['do_tow = True',
# # 'tow_init = "' + pret_carla.tow_init + '"',
# # ]
# groups['pretrained_emb2D'] = ['do_emb2D = True',
# 'emb2D_init = "' + pret_carla.emb2D_init + '"',
# # 'emb_dim = %d' % pret_carla.emb_dim,
# ]
# groups['pretrained_preocc'] = [
# 'do_preocc = True',
# 'preocc_init = "' + pret_carla.preocc_init + '"',
# ]
# groups['pretrained_vis'] = ['do_vis = True',
# 'vis_init = "' + pret_carla.vis_init + '"',
# # 'occ_cheap = ' + str(pret_carla.occ_cheap),
# ]
# groups['total_init'] = ['total_init = "' + pret_carla.total_init + '"']
# groups['pretrained_optim'] = ['optim_init = "' + pret_carla.optim_init + '"']
# groups['frozen_conf'] = ['do_freeze_conf = True', 'do_conf = True']
# groups['frozen_motionreg'] = ['do_freeze_motionreg = True', 'do_motionreg = True']
# groups['frozen_feat2D'] = ['do_freeze_feat2D = True', 'do_feat2D = True']
# groups['frozen_feat3D'] = ['do_freeze_feat3D = True', 'do_feat3D = True']
# groups['frozen_up3D'] = ['do_freeze_up3D = True', 'do_up3D = True']
# groups['frozen_vq3d'] = ['do_freeze_vq3d = True', 'do_vq3d = True']
# groups['frozen_view'] = ['do_freeze_view = True', 'do_view = True']
# groups['frozen_center'] = ['do_freeze_center = True', 'do_center = True']
# groups['frozen_seg'] = ['do_freeze_seg = True', 'do_seg = True']
# groups['frozen_vis'] = ['do_freeze_vis = True', 'do_vis = True']
# groups['frozen_flow'] = ['do_freeze_flow = True', 'do_flow = True']
# groups['frozen_match'] = ['do_freeze_match = True', 'do_match = True']
# groups['frozen_emb2D'] = ['do_freeze_emb2D = True', 'do_emb2D = True']
# groups['frozen_pri2D'] = ['do_freeze_pri2D = True', 'do_pri2D = True']
# groups['frozen_occ'] = ['do_freeze_occ = True', 'do_occ = True']
# groups['frozen_vq2d'] = ['do_freeze_vq2d = True', 'do_vq2d = True']
# groups['frozen_vq3d'] = ['do_freeze_vq3d = True', 'do_vq3d = True']
# groups['frozen_sigen3d'] = ['do_freeze_sigen3d = True', 'do_sigen3d = True']
# groups['frozen_gen3d'] = ['do_freeze_gen3d = True', 'do_gen3d = True']
# # groups['frozen_ego'] = ['do_freeze_ego = True', 'do_ego = True']
# # groups['frozen_inp'] = ['do_freeze_inp = True', 'do_inp = True']
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,410
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/archs/pixelshuffle3d.py
|
'''
reference: http://www.multisilicon.com/blog/a25332339.html
'''
import torch.nn as nn
class PixelShuffle3d(nn.Module):
'''
This class is a 3d version of pixelshuffle.
'''
def __init__(self, scale):
'''
:param scale: upsample scale
'''
super().__init__()
self.scale = scale
def forward(self, input):
batch_size, channels, in_depth, in_height, in_width = input.size()
nOut = channels // self.scale ** 3
out_depth = in_depth * self.scale
out_height = in_height * self.scale
out_width = in_width * self.scale
input_view = input.contiguous().view(batch_size, nOut, self.scale, self.scale, self.scale, in_depth, in_height, in_width)
output = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
return output.view(batch_size, nOut, out_depth, out_height, out_width)
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,411
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/nets/emb3dnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("..")
import hyperparams as hyp
import utils.improc
import utils.misc
import utils.vox
import utils.basic
class Emb3dNet(nn.Module):
def __init__(self):
super(Emb3dNet, self).__init__()
print('Emb3dNet...')
self.batch_k = 2
self.num_samples = hyp.emb3d_num_samples
assert(self.num_samples > 0)
self.sampler = utils.misc.DistanceWeightedSampling(batch_k=self.batch_k, normalize=False)
self.criterion = utils.misc.MarginLoss() #margin=args.margin,nu=args.nu)
self.beta = 1.2
self.dict_len = 20000
self.neg_pool = utils.misc.SimplePool(self.dict_len, version='pt')
self.ce = torch.nn.CrossEntropyLoss()
def sample_embs(self, emb0, emb1, valid, B, Z, Y, X, mod='', do_vis=False, summ_writer=None):
if hyp.emb3d_mindist == 0.0:
# pure random
perm = torch.randperm(B*Z*Y*X)
emb0 = emb0.reshape(B*Z*Y*X, -1)
emb1 = emb1.reshape(B*Z*Y*X, -1)
valid = valid.reshape(B*Z*Y*X, -1)
emb0 = emb0[perm[:self.num_samples*B]]
emb1 = emb1[perm[:self.num_samples*B]]
valid = valid[perm[:self.num_samples*B]]
return emb0, emb1, valid
else:
emb0_all = []
emb1_all = []
valid_all = []
for b in list(range(B)):
sample_indices, sample_locs, sample_valids = utils.misc.get_safe_samples(
valid[b], (Z, Y, X), self.num_samples, mode='3d', tol=hyp.emb3d_mindist)
emb0_s_ = emb0[b, sample_indices]
emb1_s_ = emb1[b, sample_indices]
# these are N x D
emb0_all.append(emb0_s_)
emb1_all.append(emb1_s_)
valid_all.append(sample_valids)
if do_vis and (summ_writer is not None):
sample_occ = utils.vox.voxelize_xyz(torch.unsqueeze(sample_locs, dim=0), Z, Y, X, already_mem=True)
summ_writer.summ_occ('emb3d/samples_%s/sample_occ' % mod, sample_occ, reduce_axes=[2,3])
summ_writer.summ_occ('emb3d/samples_%s/valid' % mod, torch.reshape(valid, [B, 1, Z, Y, X]), reduce_axes=[2,3])
emb0_all = torch.cat(emb0_all, axis=0)
emb1_all = torch.cat(emb1_all, axis=0)
valid_all = torch.cat(valid_all, axis=0)
return emb0_all, emb1_all, valid_all
def compute_ce_loss(self, B, C, Z, Y, X, emb_e_vec_all, emb_g_vec_all, valid_vec_all, mod='', do_vis=False, summ_writer=None):
emb_e_vec, emb_g_vec, valid_vec = self.sample_embs(emb_e_vec_all,
emb_g_vec_all,
valid_vec_all,
B, Z, Y, X,
mod=mod,
do_vis=do_vis,
summ_writer=summ_writer)
_, emb_n_vec, _ = self.sample_embs(emb_e_vec_all,
emb_g_vec_all,
valid_vec_all,
B, Z, Y, X,
mod=mod,
do_vis=do_vis,
summ_writer=summ_writer)
emb_e_vec = emb_e_vec.view(B*self.num_samples, C)
emb_g_vec = emb_g_vec.view(B*self.num_samples, C)
emb_n_vec = emb_n_vec.view(B*self.num_samples, C)
self.neg_pool.update(emb_n_vec.cpu())
# print('neg_pool len:', len(self.neg_pool))
emb_n = self.neg_pool.fetch().cuda()
# print('emb_n', emb_n.shape)
N2, C2 = list(emb_n.shape)
assert (C2 == C)
# l_negs = torch.mm(q.view(N, C), negs.view(C, N2)) # this is N x N2
emb_q = emb_e_vec.clone()
emb_k = emb_g_vec.clone()
# print('emb_q', emb_q.shape)
# print('emb_k', emb_k.shape)
N = emb_q.shape[0]
l_pos = torch.bmm(emb_q.view(N,1,-1), emb_k.view(N,-1,1))
# print('l_pos', l_pos.shape)
l_neg = torch.mm(emb_q, emb_n.T)
# print('l_neg', l_neg.shape)
l_pos = l_pos.view(N, 1)
# print('l_pos', l_pos.shape)
logits = torch.cat([l_pos, l_neg], dim=1)
labels = torch.zeros(N, dtype=torch.long).cuda()
temp = 0.07
emb_loss = self.ce(logits/temp, labels)
# print('emb_loss', emb_loss.detach().cpu().numpy())
return emb_loss
def forward(self, emb_e, emb_g, vis_e, vis_g, summ_writer=None):
total_loss = torch.tensor(0.0).cuda()
if torch.isnan(emb_e).any() or torch.isnan(emb_g).any():
assert(False)
B, C, D, H, W = list(emb_e.shape)
# put channels on the end
emb_e_vec = emb_e.permute(0,2,3,4,1).reshape(B, D*H*W, C)
emb_g_vec = emb_g.permute(0,2,3,4,1).reshape(B, D*H*W, C)
vis_e_vec = vis_e.permute(0,2,3,4,1).reshape(B, D*H*W, 1)
vis_g_vec = vis_g.permute(0,2,3,4,1).reshape(B, D*H*W, 1)
# ensure they are both nonzero, else we probably masked or warped something
valid_vec_e = 1.0 - (emb_e_vec==0).all(dim=2, keepdim=True).float()
valid_vec_g = 1.0 - (emb_g_vec==0).all(dim=2, keepdim=True).float()
valid_vec = valid_vec_e * valid_vec_g
vis_e_vec *= valid_vec
vis_g_vec *= valid_vec
# valid_g = 1.0 - (emb_g==0).all(dim=1, keepdim=True).float()
assert(self.num_samples < (B*D*H*W))
# we will take num_samples from each one
ce_loss = self.compute_ce_loss(B, C, D, H, W, emb_e_vec, emb_g_vec.detach(), vis_g_vec, 'g', False, summ_writer)
total_loss = utils.misc.add_loss('emb3d/emb_ce_loss', total_loss, ce_loss, hyp.emb3d_ce_coeff, summ_writer)
# where g is valid, we use it as reference and pull up e
l2_loss = utils.basic.reduce_masked_mean(utils.basic.sql2_on_axis(emb_e-emb_g.detach(), 1, keepdim=True), vis_g)
total_loss = utils.misc.add_loss('emb3d/emb3d_l2_loss', total_loss, l2_loss, hyp.emb3d_l2_coeff, summ_writer)
l2_loss_im = torch.mean(utils.basic.sql2_on_axis(emb_e-emb_g, 1, keepdim=True), dim=3)
if summ_writer is not None:
summ_writer.summ_oned('emb3d/emb3d_l2_loss', l2_loss_im)
summ_writer.summ_feats('emb3d/embs_3d', [emb_e, emb_g], pca=True)
return total_loss
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,412
|
aharley/neural_3d_mapping
|
refs/heads/master
|
/exp_carla_det.py
|
from exp_base import *
############## choose an experiment ##############
current = 'det_builder'
current = 'det_trainer'
mod = '"det00"' # go
mod = '"det01"' # rescore with inbound
mod = '"det02"' # show scores
mod = '"det03"' # show bev too
mod = '"det04"' # narrower bounds, to see
mod = '"det05"' # print scorelists
mod = '"det06"' # rescore actually
mod = '"det07"' # print score no matter what
mod = '"det08"' # float2str
mod = '"det09"' # run feat3d
mod = '"det10"' # really run feat3d
mod = '"det11"' # get axboxlist
mod = '"det12"' # solid centorid
mod = '"det13"' # update lrtlist util
mod = '"det14"' # run detnet
mod = '"det15"' #
mod = '"det16"' # train a whiel
mod = '"det17"' # bugfix
mod = '"det18"' # return early if score < B/2
mod = '"det19"' # new utils
mod = '"det20"' # B2
mod = '"det21"' # B4
mod = '"det22"' # clean up
mod = '"det23"' # rand centroid
mod = '"det24"' # padding 0
mod = '"det25"' # avoid warping to R0
mod = '"det26"' # scorelist *= inbound
mod = '"det27"' # use scorelist in the vis
mod = '"det28"' # only draw nonzero boxes
mod = '"det29"' # cleaned up
mod = '"det30"' # do not draw 0,1 scores
mod = '"det31"' # cleaned up
mod = '"det32"' # evaluate against axlrtlist
mod = '"det33"' # only show a fixed number of digits
mod = '"det34"' # fix that
mod = '"det35"' # maxlen=3
mod = '"det36"' # log500
############## define experiments ##############
exps['det_builder'] = [
'carla_det', # mode
'carla_multiview_10_data', # dataset
'seqlen1',
'8-4-8_bounds',
# '16-8-16_bounds',
'3_iters',
# '5k_iters',
# 'lr3',
'train_feat3d',
'train_det',
'B1',
'no_shuf',
'no_backprop',
# 'log50',
'log1',
]
exps['det_trainer'] = [
'carla_det', # mode
# 'carla_multiview_10_data', # dataset
'carla_multiview_train_data', # dataset
'seqlen1',
# 'carla_multiview_train_val_data', # dataset
'16-8-16_bounds',
# 'carla_16-8-16_bounds_train',
# 'carla_16-8-16_bounds_val',
'200k_iters',
'lr3',
'B4',
'train_feat3d',
'train_det',
'log500',
]
############## group configs ##############
groups['carla_det'] = ['do_carla_det = True']
############## datasets ##############
# DHW for mem stuff
SIZE = 32
Z = int(SIZE*4)
Y = int(SIZE*2)
X = int(SIZE*4)
K = 8 # how many proposals to consider
# H and W for proj stuff
PH = int(H/2.0)
PW = int(W/2.0)
# S = 1
# groups['carla_multiview_10_data'] = [
# 'dataset_name = "carla"',
# 'H = %d' % H,
# 'W = %d' % W,
# 'trainset = "mags7i3ten"',
# 'trainset_format = "multiview"',
# 'trainset_seqlen = %d' % S,
# 'dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"',
# 'dataset_filetype = "npz"'
# ]
# groups['carla_multiview_train_val_data'] = [
# 'dataset_name = "carla"',
# 'H = %d' % H,
# 'W = %d' % W,
# 'trainset = "mags7i3t"',
# 'trainset_format = "multiview"',
# 'trainset_seqlen = %d' % S,
# 'valset = "mags7i3v"',
# 'valset_format = "multiview"',
# 'valset_seqlen = %d' % S,
# 'dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"',
# 'dataset_filetype = "npz"'
# ]
############## verify and execute ##############
def _verify_(s):
varname, eq, val = s.split(' ')
assert varname in globals()
assert eq == '='
assert type(s) is type('')
print(current)
assert current in exps
for group in exps[current]:
print(" " + group)
assert group in groups
for s in groups[group]:
print(" " + s)
_verify_(s)
exec(s)
s = "mod = " + mod
_verify_(s)
exec(s)
|
{"/model_carla_det.py": ["/hyperparams.py", "/nets/detnet.py"], "/model_carla_ego.py": ["/hyperparams.py", "/nets/egonet.py"], "/exp_carla_static.py": ["/exp_base.py"], "/model_carla_static.py": ["/hyperparams.py", "/nets/emb2dnet.py", "/nets/emb3dnet.py", "/nets/viewnet.py"], "/nets/detnet.py": ["/hyperparams.py", "/archs/encoder3d.py"], "/exp_carla_ego.py": ["/exp_base.py"], "/archs/encoder3d.py": ["/archs/pixelshuffle3d.py"], "/backend/saverloader.py": ["/hyperparams.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py", "/hyperparams.py"], "/nets/flownet.py": ["/hyperparams.py"], "/nets/emb2dnet.py": ["/archs/encoder2d.py", "/hyperparams.py"], "/nets/egonet.py": ["/hyperparams.py"], "/nets/viewnet.py": ["/hyperparams.py"], "/exp_base.py": ["/pretrained_nets_carla.py"], "/nets/emb3dnet.py": ["/hyperparams.py"], "/exp_carla_det.py": ["/exp_base.py"]}
|
2,503
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/lane.py
|
from methods import *
from slidingwindow import *
l = Lane()
def frame(img):
binary_warped = l.sliding_window(img)
warp = fast_warp_lane(binary_warped)
out = cv2.addWeighted(img, 1.0, warp, 0.5, 0)
return out
def frame_convolution(img):
res = apply_thresholds(img)
res_rgb = np.dstack((res*255, res*255, res*255))
lane = fast_unwarp_lane(res_rgb)
unwarp = convolution(lane[:,:,0])
warp = fast_warp_lane(unwarp)
out = cv2.addWeighted(img, 1.0, warp, 0.5, 0)
if __name__ == "__main__":
inp = "project_small"
#inp = "project_video"
process_video(infile=inp + ".mp4",
outfile=inp + "_final.mp4",
method=frame_convolution)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,504
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/run_1b_undistort.py
|
from methods import *
import cv2
#load calibration
mtx, dist = load_calibration()
test_image = cv2.imread('test_images/test4.jpg')
test_image = test_image[..., ::-1]
undist = cv2.undistort(test_image, mtx, dist)
plot_images_save('output_images/1b_undistort.png', [ [test_image, undist] ])
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,505
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/run_1_undistort.py
|
from methods import *
import cv2
#load calibration
mtx, dist = load_calibration()
test_image = cv2.imread('camera_cal/calibration1.jpg')
undist = cv2.undistort(test_image, mtx, dist)
plot_images_save('output_images/1_undistort.png', [ [test_image, undist] ])
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,506
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/run_6_image.py
|
from methods import *
from methods_sliding_window import *
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
import glob
import cv2
import matplotlib.image as mpimg
lane = Lane()
def process(img):
out = lane.sliding_window(img)
out = fast_warp_lane(out)
out = cv2.addWeighted(img, 1.0, out, 0.5, 0)
lane.draw_curvature(out)
return out
if __name__ == "__main__":
"""
#inp = "project_small"
inp = "project_video"
process_video(infile=inp + ".mp4",
outfile=inp + "_threshold.mp4",
method=thresh)
"""
files = glob.glob("test_images/test2.jpg")
print(files)
images = []
for ix, fname in enumerate(files):
img = mpimg.imread(fname)
t = process(img)
images.append([img, t])
plot_images_save("output_images/6_lane.png", images)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,507
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/run_3_warping.py
|
from methods import *
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
import glob
import cv2
import matplotlib.image as mpimg
def thresh(img, warp=False):
res = apply_thresholds(img)
binary = np.dstack((res*255, res*255, res*255))
binary_warped = binary
if warp == True:
binary_warped = fast_unwarp_lane(binary)
return binary_warped
if __name__ == "__main__":
"""
#inp = "project_small"
inp = "project_video"
process_video(infile=inp + ".mp4",
outfile=inp + "_threshold.mp4",
method=thresh)
"""
files = glob.glob("test_images/test2.jpg")
print(files)
images = []
for ix, fname in enumerate(files):
img = mpimg.imread(fname)
t = thresh(img, False)
images.append([img, t])
plot_images_save("output_images/2_threshold.png", images)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,508
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/calibrate.py
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
nx = 9
ny = 6
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(nx,ny,0)
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
#draw the chessboard corners and plot to show
cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
plt.imshow(img)
plt.show()
def save_calibration(objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[0:2], None, None)
calib = [mtx, dist]
pickle.dump(calib, open("calibration.pkl", "wb"))
# save the calibration to load in successive runs, as this calibration is slow.
save_calibration(objpoints, imgpoints)
print("Calibration Saved")
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,509
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/methods.py
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from moviepy.editor import VideoFileClip
from tqdm import tqdm
def load_calibration():
calib = pickle.load(open("calibration.pkl", "rb"))
print("Calibration Loaded")
return calib[0], calib[1]
#load calibration
mtx, dist = load_calibration()
img_size = (1280, 720)
def load_transforms():
#load perspective vars
src = np.float32([[ 585.0-10, 460.0],
[ 0.0, 720.0],
[ 1280.0, 720.0],
[ 695.0+10, 460.0]])
dst = np.float32([[320, 0],
[320, 720],
[960, 720],
[960, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv
M, Minv = load_transforms()
def undistort(img):
undist = cv2.undistort(img, mtx, dist)
return undist
def fast_unwarp_lane(img):
global dist, mtx, img_size
undist = cv2.undistort(img, mtx, dist)
out = cv2.warpPerspective(undist, M, img_size, flags=cv2.INTER_LINEAR)
return out
def fast_warp_lane(lane):
global Minv, img_size
unwarped = cv2.warpPerspective(lane, Minv, img_size, flags=cv2.INTER_LINEAR)
return unwarped
def process_video(infile, outfile, method):
"""method has to accept rgb image and return rgb image. method is called on every frame of infile."""
clip1 = VideoFileClip(infile)
white_clip = clip1.fl_image(method) #NOTE: this function expects color images!!
white_clip.write_videofile(outfile, audio=False)
def plot_images(images):
""" Helper routine which plots all images passed as array in a single row """
m = len(images)
n = len(images[0])
fig, axes = plt.subplots(m, n, figsize=(10*n, 10*m))
fig.tight_layout()
for ix in range(m):
for iy in range(n):
axes[ix][iy].imshow(images[ix][iy], cmap='gray')
axes[ix][iy].axis('off')
plt.show()
def plot_images_save(fname, images):
""" Helper routine which plots all images passed as array in a single row """
print(len(images))
m = len(images)
n = len(images[0])
fig, axes = plt.subplots(m, n, figsize=(10*n, 10*m))
if m == 1:
axes = [axes]
fig.tight_layout()
for ix in range(m):
for iy in range(n):
axes[ix][iy].imshow(images[ix][iy], cmap='gray')
axes[ix][iy].axis('off')
fig.savefig(fname)
def abs_sobel_thresh(img_gray, orient='x', ksize=3, thresh=(20,100)):
sobel = None
if orient=='x':
sobel = cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=ksize)
else:
sobel = cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=ksize)
abs_sobel = np.absolute(sobel)
scaled = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled)
binary_output[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1
return binary_output
def mag_thresh(gray, ksize=9, thresh=(20,80)):
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
abssolxy = np.sqrt(sobelx ** 2 + sobely ** 2)
scaledxy = (abssolxy*255/np.max(abssolxy)).astype(np.uint8)
binary_output = np.zeros_like(scaledxy)
binary_output[(scaledxy >= thresh[0]) & (scaledxy <= thresh[1])] = 1
return binary_output
def dir_thresh(gray, ksize=15, thresh=(0.0, np.pi/2)):
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
abssobelx = np.absolute(sobelx)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
abssobely = np.absolute(sobely)
abssobelxy = np.arctan2(abssobely,abssobelx)
binary_output = np.zeros(abssobelxy.shape, dtype=np.uint8)
binary_output[(abssobelxy >= thresh[0]) & (abssobelxy <= thresh[1])] = 1
return binary_output
def hsv_select(image_hsv):
i_h = image_hsv[:,:,0]
i_s = image_hsv[:,:,1]
i_v = image_hsv[:,:,2]
i_h = cv2.equalizeHist(i_h)
i_s = cv2.equalizeHist(i_s)
i_v = cv2.equalizeHist(i_v)
res = np.zeros_like(i_h).astype(np.uint8)
res[(i_s > 200) & (i_v > 150) ] = 1 #yellow only
res[(i_s<80) & (i_v > 240)] = 1 #white only
return res
def hsv_debug(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
i_h = img_hsv[:,:,0]
i_s = img_hsv[:,:,1]
i_v = img_hsv[:,:,2]
i_h_e = cv2.equalizeHist(i_h)
i_s_e = cv2.equalizeHist(i_s)
i_v_e = cv2.equalizeHist(i_v)
return i_h_e, i_s_e, i_v_e
def apply_stage_1(img_gray):
x_image = abs_sobel_thresh(img_gray, orient='x', ksize=3, thresh=(20,200))
y_image = abs_sobel_thresh(img_gray, orient='y', ksize=3, thresh=(20,200))
xy_image = mag_thresh(img_gray, ksize=9, thresh=(20,100))
dir_image = dir_thresh(img_gray, ksize=9, thresh=(0.7, 1.3))
img_stage_1 = np.zeros_like(x_image)
img_stage_1[(x_image == 1) | ((xy_image == 1) & (dir_image == 1))] = 1 #dir_image is not working after lot of trials and error.
#img_stage_1[((x_image == 1) | (y_image == 1))] = 1
return img_stage_1
def apply_thresholds(img):
img = cv2.undistort(img, mtx, dist, None, mtx)
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img_hsv_s = cv2.equalizeHist(img_hsv[:,:,2])
stage_1 = apply_stage_1(img_hsv_s) #sending S channel as gray for sobel thresholding
stage_2 = hsv_select(img_hsv)
# hsv is generally better overall. x/y only on certain occasions.
#res = (stage_1 * 72) + (stage_2 * 182) # lot of trials, got this. hsv is prominent, but xy sobel is when hsv is not working.
res = stage_1 + stage_2
binary_out = np.zeros_like(res)
binary_out[res > 100] = 1
return res, stage_1, stage_2
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,510
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/methods_sliding_window.py
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from moviepy.editor import VideoFileClip
from tqdm import tqdm
from methods import *
class Line:
def __init__(self):
self.lane_inds = []
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = np.array([False])
self.previous_fit = []
#radius of curvature of the line in some units
self.radius_of_curvature = 0
#error between current_fit and previous_fit
self.curr_err = 0.0
class Lane:
def __init__(self):
self.left = Line()
self.right = Line()
self.debug_image = np.zeros((1080, 1920, 3), dtype=np.uint8)
def binary_warped(self, img):
res, st1, st2 = apply_thresholds(img)
res_rgb = np.dstack((res, res, res))*255
st1_rgb = np.dstack((st1, st1, st1))*255
st2_rgb = np.dstack((st2, st2, st2))*255
lane = fast_unwarp_lane(res_rgb)
st1_uw = fast_unwarp_lane(st1_rgb)
st2_uw = fast_unwarp_lane(st2_rgb)
binary_warped = lane[:,:,2]
#following only used for debugging pipeline
self.debug_image[840:1080, 0:320] = cv2.resize(st1_uw, (320, 240), interpolation=cv2.INTER_AREA)
self.debug_image[840:1080, 320:640] = cv2.resize(st2_uw, (320, 240), interpolation=cv2.INTER_AREA)
self.debug_image[840:1080, 640:960] = cv2.resize(lane, (320, 240), interpolation=cv2.INTER_AREA)
cv2.rectangle(self.debug_image,(0,840),(320,1080),(0,255,255), 2)
cv2.rectangle(self.debug_image,(320,840),(640,1080),(0,255,255), 2)
cv2.rectangle(self.debug_image,(640,840),(960,1080),(0,255,255), 2)
a,b,c = hsv_debug(img)
a = fast_unwarp_lane(np.dstack((a,a,a))*255)
b = fast_unwarp_lane(np.dstack((b,b,b))*255)
c = fast_unwarp_lane(np.dstack((c,c,c))*255)
self.debug_image[0:240, 1600:1920] = cv2.resize(a, (320, 240), interpolation=cv2.INTER_AREA)
self.debug_image[240:480, 1600:1920] = cv2.resize(b, (320, 240), interpolation=cv2.INTER_AREA)
self.debug_image[480:720, 1600:1920] = cv2.resize(c, (320, 240), interpolation=cv2.INTER_AREA)
return binary_warped
def sliding_window(self, img):
binary_warped = self.binary_warped(img)
if self.left.current_fit.size > 0:
out = self.sliding_first(binary_warped)
else:
out = self.sliding_next(binary_warped)
self.draw_search_window_area(binary_warped, d1=960, d2=1280) #plot before dropping fit
self.process_fits()
out = self.draw_projection(binary_warped)
self.draw_search_window_area(binary_warped)
return out
def sliding_first(self, binary_warped):
# Takins in binary warped, and returns sliding window drawn image with
# left right inds colored
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
self.left.lane_inds = []
self.right.lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
self.left.lane_inds.append(good_left_inds)
self.right.lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
self.left.lane_inds = np.concatenate(self.left.lane_inds)
self.right.lane_inds = np.concatenate(self.right.lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[self.left.lane_inds]
lefty = nonzeroy[self.left.lane_inds]
rightx = nonzerox[self.right.lane_inds]
righty = nonzeroy[self.right.lane_inds]
# Fit a second order polynomial to each
self.left.current_fit = np.polyfit(lefty, leftx, 2)
self.right.current_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = self.left.current_fit[0]*ploty**2 + self.left.current_fit[1]*ploty + self.left.current_fit[2]
right_fitx = self.right.current_fit[0]*ploty**2 + self.right.current_fit[1]*ploty + self.right.current_fit[2]
out_img[nonzeroy[self.left.lane_inds], nonzerox[self.left.lane_inds]] = [255, 0, 0]
out_img[nonzeroy[self.right.lane_inds], nonzerox[self.right.lane_inds]] = [0, 0, 255]
return out_img
def sliding_next(self,binary_warped):
# We now have a new warped binary image
# It's now much easier to find line pixels!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
self.left.lane_inds = ((nonzerox > (self.left.current_fit[0]*(nonzeroy**2) + self.left.current_fit[1]*nonzeroy + self.left.current_fit[2] - margin)) & (nonzerox < (self.left.current_fit[0]*(nonzeroy**2) + self.left.current_fit[1]*nonzeroy + self.left.current_fit[2] + margin)))
self.right.lane_inds = ((nonzerox > (self.right.current_fit[0]*(nonzeroy**2) + self.right.current_fit[1]*nonzeroy + self.right.current_fit[2] - margin)) & (nonzerox < (self.right.current_fit[0]*(nonzeroy**2) + self.right.current_fit[1]*nonzeroy + self.right.current_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[self.left.lane_inds]
lefty = nonzeroy[self.left.lane_inds]
rightx = nonzerox[self.right.lane_inds]
righty = nonzeroy[self.right.lane_inds]
# Fit a second order polynomial to each
self.left.current_fit = np.polyfit(lefty, leftx, 2)
self.right.current_fit = np.polyfit(righty, rightx, 2)
return binary_warped
def draw_search_window_area(self, binary_warped, d1=1280, d2=1600):
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Color in left and right line pixels
out_img[nonzeroy[self.left.lane_inds], nonzerox[self.left.lane_inds]] = [255, 0, 0]
out_img[nonzeroy[self.right.lane_inds], nonzerox[self.right.lane_inds]] = [0, 0, 255]
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = self.left.current_fit[0]*ploty**2 + self.left.current_fit[1]*ploty + self.left.current_fit[2]
right_fitx = self.right.current_fit[0]*ploty**2 + self.right.current_fit[1]*ploty + self.right.current_fit[2]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
margin = 100
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
self.debug_image[840:1080, d1:d2] = cv2.resize(result, (320, 240), interpolation=cv2.INTER_AREA)
cv2.rectangle(self.debug_image,(960,840),(1280,1080),(0,255,255), 2)
return result
def process_fits(self):
last_n = 5
#measure the error between fits and store into curr_err.
if self.left is not None and self.left.current_fit is not None and len(self.left.previous_fit)>0:
#ploty = np.linspace(0, 720-1, 720)
#left_fitx = self.left.current_fit[0]*ploty**2 + self.left.current_fit[1]*ploty + self.left.current_fit[2]
#left_prev_fitx = self.left.previous_fit[-1][0]*ploty**2 + self.left.previous_fit[-1][1]*ploty + self.left.previous_fit[-1][2]
#err_p = np.mean((left_fitx - left_prev_fitx)**2) #/np.sum(right_fit_prev[0]**2)
err_p = np.mean((self.left.current_fit - self.left.previous_fit[-1])**2) #/np.sum(right_fit_prev[0]**2)
err_p = np.sqrt(err_p)
self.left.curr_err = err_p
#right_fitx = self.right.current_fit[0]*ploty**2 + self.right.current_fit[1]*ploty + self.right.current_fit[2]
#right_prev_fitx = self.right.previous_fit[-1][0]*ploty**2 + self.right.previous_fit[-1][1]*ploty + self.right.previous_fit[-1][2]
#err_p = np.mean((right_fitx - right_prev_fitx)**2) #/np.sum(right_fit_prev[0]**2)
err_p = np.mean((self.right.current_fit - self.right.previous_fit[-1])**2) #/np.sum(right_fit_prev[0]**2)
err_p = np.sqrt(err_p)
self.right.curr_err = err_p
else:
self.left.curr_err = 0.0
self.right.curr_err = 0.0
#if error is too high, drop the current_fit and use previous_fit
if self.left.curr_err > 50.0:
self.left.current_fit = self.left.best_fit
if self.right.curr_err > 50.0:
self.right.current_fit = self.right.best_fit
#average the fit over last_n iterations
self.left.previous_fit.append(self.left.current_fit)
if len(self.left.previous_fit) > last_n:
self.left.previous_fit = self.left.previous_fit[1:]
self.left.best_fit = np.average(self.left.previous_fit, axis=0)
self.right.previous_fit.append(self.right.current_fit)
if len(self.right.previous_fit) > last_n:
self.right.previous_fit = self.right.previous_fit[1:]
self.right.best_fit = np.average(self.right.previous_fit, axis=0)
#assign the best_fit / averate to current_fit for next steps
self.left.current_fit = self.left.best_fit
self.right.current_fit = self.right.best_fit
def draw_curvature(self, img):
#draws curvature metrics onto the img
y_eval = np.max(img.shape[0]-1)
left_curverad = ((1 + (2*self.left.current_fit[0]*y_eval + self.left.current_fit[1])**2)**1.5) / np.absolute(2*self.left.current_fit[0])
right_curverad = ((1 + (2*self.right.current_fit[0]*y_eval + self.right.current_fit[1])**2)**1.5) / np.absolute(2*self.right.current_fit[0])
#cv2.putText(img, "left:{0:.2f}".format(left_curverad), (100,100), cv2.FONT_HERSHEY_PLAIN,2, 255)
#cv2.putText(img, "right:{0:.2f}".format(right_curverad), (100,150), cv2.FONT_HERSHEY_PLAIN,2, 255)
#print(left_curverad, right_curverad)
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
leftx = self.left.current_fit[0]*ploty**2 + self.left.current_fit[1]*ploty + self.left.current_fit[2]
rightx = self.right.current_fit[0]*ploty**2 + self.right.current_fit[1]*ploty + self.right.current_fit[2]
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
self.left.radius_of_curvature = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
self.right.radius_of_curvature = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#print(left_curverad, 'm', right_curverad, 'm')
cv2.putText(img, "Radius Left:{0:.2f}m".format(self.left.radius_of_curvature), (10,50), cv2.FONT_HERSHEY_PLAIN, 2, 255)
cv2.putText(img, "Radius Right:{0:.2f}m".format(self.right.radius_of_curvature), (10,100), cv2.FONT_HERSHEY_PLAIN, 2, 255)
# Example values: 632.1 m 626.2 m
self.draw_lane_deviation(img)
str_err = 'Error: Left = ' + str(np.round(self.left.curr_err,2)) + ', Right = ' + str(np.round(self.right.curr_err,2))
font = cv2.FONT_HERSHEY_PLAIN
middlepanel = np.zeros((120, 1280, 3), dtype=np.uint8)
cv2.putText(middlepanel, str_err, (30, 60), font, 2, (255,0,0), 2)
#cv2.putText(middlepanel, str_offset, (30, 90), font, 1, (255,0,0), 2)
self.debug_image[720:840, 0:1280] = middlepanel
return img
def draw_projection(self, binary_warped):
#draws the projection and returns color image
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = self.left.current_fit[0]*ploty**2 + self.left.current_fit[1]*ploty + self.left.current_fit[2]
right_fitx = self.right.current_fit[0]*ploty**2 + self.right.current_fit[1]*ploty + self.right.current_fit[2]
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
self.debug_image[480:720, 1280:1600] = cv2.resize(color_warp, (320, 240), interpolation=cv2.INTER_AREA)
return color_warp
def draw_lane_deviation(self, img):
## Compute intercepts
img_size = img.shape[0:2]
left_bot = img_size[0] * self.left.current_fit[0]**2 + img_size[0]*self.left.current_fit[1] + self.left.current_fit[2]
right_bot = img_size[0] * self.right.current_fit[0]**2 + img_size[0]*self.right.current_fit[1] + self.right.current_fit[2]
## Compute center location
val_center = (left_bot+right_bot)/2.0
## Compute lane offset
dist_offset = val_center - img_size[1]/2
dist_offset = np.round(dist_offset/2.81362,2)
str_offset = 'Lane deviation: ' + str(dist_offset) + ' cm.'
cv2.putText(img, str_offset, (10,150), cv2.FONT_HERSHEY_PLAIN, 2, 255)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,511
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/run_7_video.py
|
from methods import *
from methods_sliding_window import *
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
import glob
import cv2
import matplotlib.image as mpimg
lane = Lane()
def process(img):
undist = undistort(img)
out = lane.sliding_window(img)
out = fast_warp_lane(out)
out = cv2.addWeighted(undist, 1.0, out, 0.5, 0)
lane.draw_curvature(out)
#lane.draw_search_window_area(out)
lane.debug_image[0:720, 0:1280] = out
cv2.rectangle(lane.debug_image,(0,0),(1280,1080),(0,255,255), 2)
return lane.debug_image
return out
if __name__ == "__main__":
#inp = "project_small"
#inp = "project_video"
inp = "challenge_video"
process_video(infile=inp + ".mp4",
outfile=inp + "_final.mp4",
method=process)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,512
|
darcwader/sdcn_p4_advanced_lane_finding
|
refs/heads/master
|
/curvature.py
|
from methods import *
from methods_sliding_window import *
l = Lane()
def frame(img):
binary_warped = l.sliding_window(img)
warp = fast_warp_lane(binary_warped)
out = cv2.addWeighted(img, 1.0, warp, 0.5, 0)
l.draw_curvature(out)
return out
if __name__ == "__main__":
inp = "project_small"
#inp = "project_video"
process_video(infile=inp + ".mp4",
outfile=inp + "_curvature.mp4",
method=frame)
|
{"/lane.py": ["/methods.py"], "/run_1b_undistort.py": ["/methods.py"], "/run_1_undistort.py": ["/methods.py"], "/run_6_image.py": ["/methods.py", "/methods_sliding_window.py"], "/run_3_warping.py": ["/methods.py"], "/methods_sliding_window.py": ["/methods.py"], "/run_7_video.py": ["/methods.py", "/methods_sliding_window.py"], "/curvature.py": ["/methods.py", "/methods_sliding_window.py"]}
|
2,525
|
Juice178/song_visualization
|
refs/heads/master
|
/flaskr/plugin/Exceptions.py
|
class ToJsonException(Exception):
pass
|
{"/app.py": ["/flaskr/__init__.py"], "/flaskr/visualize.py": ["/flaskr/api/spotify.py", "/flaskr/plugin/Exceptions.py"]}
|
2,526
|
Juice178/song_visualization
|
refs/heads/master
|
/app.py
|
from flaskr import create_app
import os
env = os.getenv('env', 'prod')
app = create_app(env)
if __name__ == "__main__":
app.run(threaded=True)
|
{"/app.py": ["/flaskr/__init__.py"], "/flaskr/visualize.py": ["/flaskr/api/spotify.py", "/flaskr/plugin/Exceptions.py"]}
|
2,527
|
Juice178/song_visualization
|
refs/heads/master
|
/flaskr/__init__.py
|
import os
from flask import Flask
import logging
def create_app(env="stg"):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
app.logger.setLevel(logging.ERROR)
if env == 'stg':
app.config.from_pyfile("stg.cfg", silent=False)
else:
app.config.from_pyfile("prod.cfg", silent=False)
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import visualize
app.register_blueprint(visualize.bp)
app.add_url_rule('/', endpoint="index")
return app
|
{"/app.py": ["/flaskr/__init__.py"], "/flaskr/visualize.py": ["/flaskr/api/spotify.py", "/flaskr/plugin/Exceptions.py"]}
|
2,528
|
Juice178/song_visualization
|
refs/heads/master
|
/flaskr/visualize.py
|
from flask import (
Blueprint, flash,render_template, request
)
from spotipy import SpotifyException
from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOauthError
from flaskr.api.spotify import Spotipy
from flaskr.plugin.Exceptions import ToJsonException
import pandas as pd
import logging
bp = Blueprint('viz', __name__)
@bp.route('/', methods=("GET", "POST"))
def get_data():
"""
Works differently depending on a HTTP method
IF GET : Return html file for username, password, artist id inputs
IF POST : Get data and convert it to a json file, which then is passed to html file
"""
# show a form to enter client id and secret key only when an user loggin for the first time
is_first_time_login = True
if request.method == 'POST':
error = None
# Instantiate object only when logged in for the first time
if is_first_time_login:
client_id = request.form['client_id']
client_secret = request.form['client_secret']
# get an client throught which an user get spotify music data
sp_client = Spotipy(client_id, client_secret)
artist_id = request.form['artist_id']
try:
# get top 10 songs of an artist
top_tracks = sp_client.get_artist_top_tracks(artist_id)
except SpotifyOauthError as e:
error = 'Either client id or client secret you entered is wrong.'
except SpotifyException as e:
error = e
is_first_time_login = False
except Exception as e:
error = e
else:
is_first_time_login = False
if error:
flash(error)
else:
try:
json_file, artist_name = toJson(sp_client, top_tracks)
except ToJsonException as e:
json_file = None
logging.error(f"Failed to convert data to JSON \n{e}")
finally:
return render_template('visualize/index.html', data=json_file, is_first_time_login=is_first_time_login, artist_name=artist_name)
return render_template('visualize/index.html', data=None, is_first_time_login=is_first_time_login, artist_name='')
def toJson(sp_client, top_tracks):
"""
Convert dictionary to json file
Params
______
sp_client: An instance to get music data
top_traicks (dict): Information about an artist and his/her songs
Returns
_______
json_file(json): Data with 15 columns
artist_name (str): Artist name corresponding to artist id an user entered
"""
# features about a song
feature_names = [
'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms'
]
rows = []
try:
for track in top_tracks['tracks']:
row = []
row.append(track['artists'][0]['name'])
row.append(track['name'])
row.append(track['popularity'])
song_id = track['id']
feature_values = sp_client.get_audio_features(song_id)[0]
features = [val for key, val in feature_values.items() if key in feature_names]
row.extend(features)
rows.append(row)
columns = ['artist_name', 'song_name', 'popularity']
columns.extend(feature_names)
df = pd.DataFrame(rows, columns = columns)
json_file = df.to_json(orient='records')
except Exception as e:
raise ToJsonException(e)
return json_file, df['artist_name'][0]
|
{"/app.py": ["/flaskr/__init__.py"], "/flaskr/visualize.py": ["/flaskr/api/spotify.py", "/flaskr/plugin/Exceptions.py"]}
|
2,529
|
Juice178/song_visualization
|
refs/heads/master
|
/flaskr/api/spotify.py
|
"""A wrapper class for spotipy
"""
from spotipy import Spotify, SpotifyException
from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOauthError
class Spotipy(object):
def __init__(self, client_id, client_secret):
self._sp = self.get_client(client_id, client_secret)
def get_client(self, client_id, client_secret):
client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
return Spotify(client_credentials_manager=client_credentials_manager)
def get_artist_top_tracks(self, artist_id):
artist_uri = f'spotify:artist:{artist_id}'
return self._sp.artist_top_tracks(artist_uri)
def get_audio_features(self, song_id):
return self._sp.audio_features(song_id)
|
{"/app.py": ["/flaskr/__init__.py"], "/flaskr/visualize.py": ["/flaskr/api/spotify.py", "/flaskr/plugin/Exceptions.py"]}
|
2,536
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/resolve.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import re
import sys
import subprocess
from colored import fg, bg, attr
from functools import partial
from multiprocessing.dummy import Pool
class Resolve( object ):
ips = []
n_ips = 0
dead_host = []
n_dead = 0
full_output = ''
def run( self, t_hosts ):
sys.stdout.write( '[+] running mod: resolve...\n' )
t_multiproc = {
'n_current': 0,
'n_total': len(t_hosts)
}
pool = Pool( 10 )
pool.map( partial(self.resolve,t_multiproc), t_hosts )
pool.close()
pool.join()
self.n_ips = len(self.ips)
self.n_dead = len(self.dead_host)
def resolve( self, t_multiproc, host ):
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
try:
cmd = 'host ' + host
output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True ).decode('utf-8')
# print(output)
# ip = socket.gethostbyname( host )
except Exception as e:
# sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return
self.full_output = self.full_output + output + "\n"
matches = re.findall( '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', output )
if matches:
for ip in matches:
if not ip in self.ips:
self.ips.append( ip )
else:
if host not in sef.dead_host:
self.dead_host.append( host )
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,537
|
blackout314/myrecon.py
|
refs/heads/master
|
/myrecon.py
|
#!/usr/bin/python3.5
# I don't believe in license.
# You can do whatever you want with this program.
t_available_mods = ['resolve', 'screenshot', 'quickhits', 'crlf', 'openredirect']
#
# init app
#
from modules import functions as func
from modules.app import App
app = App()
func.parseargs( app, t_available_mods )
#
# ###
#
#
# MOD: subdomains
#
from modules.subdomains import Subdomains
mod = Subdomains()
mod.run( app.domains )
if not mod.n_hosts:
exit()
app.setHosts( mod.hosts )
#
# ###
#
#
# MOD: resolve
#
if 'resolve' in app.mods:
from modules.resolve import Resolve
mod = Resolve()
mod.run( app.hosts )
app.setIps( mod.ips, mod.full_output )
if mod.n_dead:
app.setDeadHosts( mod.dead_host )
#
# ###
#
#
# create urls used by other tools
#
app.createUrls()
#
# ###
#
#
# optional modules
#
if 'screenshot' in app.mods:
from modules import screenshot
screenshot.run( app )
if 'quickhits' in app.mods:
from modules import quickhits
quickhits.run( app )
if 'crlf' in app.mods:
from modules import crlf
crlf.run( app )
if 'openredirect' in app.mods:
from modules import openredirect
openredirect.run( app )
#
# ###
#
# app.wait()
# next
# cors
# google dorks
# new subdomains
# endpoints
# gf mykeys
# gf noisy
# gf takeovers
# final report
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,538
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/functions.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import argparse
import tldextract
def parseargs( app, t_available_mods ):
parser = argparse.ArgumentParser()
parser.add_argument( "-d","--domain",help="domain, single, multiples or files", action="append" )
parser.add_argument( "-o","--output",help="output dir" )
parser.add_argument( "-m","--mod",help="mods to run, can be: resolve, screenshots, quickhits, crlf, openredirect. Default: resolve,screenshots,quickhits" )
parser.parse_args()
args = parser.parse_args()
if args.output:
if os.path.isdir(args.output):
output_dir = args.output
else:
try:
os.makedirs( args.output )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
exit()
app.setOutputDirectory( args.output )
else:
app.setOutputDirectory( os.getcwd() )
if args.domain:
t_domains = []
for d in args.domain:
if os.path.isfile(d):
sys.stdout.write( '[+] loading file: %s\n' % d )
for l in open(d,'r'):
l = l.strip()
if isDomain(l) and l not in t_domains:
t_domains.append( l )
else:
if isDomain( d ) and d not in t_domains:
t_domains.append( d )
if not len(t_domains):
parser.error( 'domain missing' )
else:
parser.error( 'domain missing' )
if args.mod:
t_mods = []
for m in args.mod.split(','):
if not m in t_available_mods and m != 'all':
parser.error( ("mod '%s' doesn't exist" % m) )
# sys.stdout.write( "%s[-] mod %s doesn't exist.%s\n" % (fg('red'),m,attr(0)) )
else:
if m == 'all':
t_mods = t_available_mods
break
else:
t_mods.append( m )
if not len(t_mods):
parser.error( 'mod missing' )
else:
t_mods = t_available_mods
app.setDomains( t_domains )
app.setMods( t_mods )
def isDomain( str ):
t_parse = tldextract.extract( str )
if t_parse.subdomain == '' and t_parse.domain != '' and t_parse.suffix != '':
return True
else:
return False
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,539
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/subdomains.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import re
import sys
import subprocess
from colored import fg, bg, attr
from functools import partial
from multiprocessing.dummy import Pool
class Subdomains( object ):
hosts = []
n_hosts = 0
def run( self, t_domains ):
sys.stdout.write( '[+] looking for subdomains...\n' )
t_multiproc = {
'n_current': 0,
'n_total': len(t_domains)
}
pool = Pool( 3 )
pool.map( partial(self.find,t_multiproc), t_domains )
pool.close()
pool.join()
self.n_hosts = len(self.hosts)
def find( self, t_multiproc, domain ):
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
try:
# cmd = 'sublist3r -d ' + domain
cmd = 'findomain -t ' + domain
output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True ).decode('utf-8')
# print(output)
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return
# matches = re.findall( '92m([a-zA-Z0-9\._-]+\.'+domain+')', output)
matches = re.findall( '([a-zA-Z0-9\._-]+\.'+domain+')', output)
if matches:
for sub in matches:
sub = sub.strip('._- ')
if sub not in self.hosts:
self.hosts.append( sub )
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,540
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/app.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import time
from colored import fg, bg, attr
class App( object ):
mods = []
d_output = ''
f_domains = ''
f_hosts = ''
f_tmphosts = ''
f_dead = ''
f_ips = ''
f_urls = ''
domains = []
n_domains = 0
hosts = []
n_hosts = 0
ips = []
n_ips = 0
dead = []
n_dead = 0
urls = []
n_urls = 0
def wait( self ):
i = 0
t_chars = ['|','/','-','\\','|','/','-']
l = len(t_chars)
sys.stdout.write( "\n\n" )
for n in range(100000):
time.sleep( 0.5 )
sys.stdout.write( ' %s\r' % t_chars[n%l] )
def setMods( self, t_mods ):
self.mods = t_mods
def setOutputDirectory( self, cd_output ):
self.d_output = cd_output
sys.stdout.write( '[+] output directory is: %s\n' % self.d_output )
self.initFilePath()
def initFilePath( self ):
self.f_domains = self.d_output + '/domains'
self.f_hosts = self.d_output + '/hosts'
self.f_tmphosts = self.d_output + '/tmp_hosts'
self.f_dead = self.d_output + '/dead'
self.f_ips = self.d_output + '/ips'
self.f_urls = self.d_output + '/urls'
def setDomains( self, t_domains ):
self.domains = t_domains
self.n_domains = len(t_domains)
sys.stdout.write( '%s[+] %d domains found.%s\n' % (fg('green'),self.n_domains,attr(0)) )
if self.n_domains:
fp = open( self.f_domains, 'w' )
fp.write( "\n".join(self.domains) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_domains )
def setHosts( self, t_hosts ):
self.hosts = t_hosts
self.n_hosts = len(t_hosts)
sys.stdout.write( '%s[+] %d hosts found.%s\n' % (fg('green'),self.n_hosts,attr(0)) )
if self.n_hosts:
fp = open( self.f_hosts, 'w' )
fp.write( "\n".join(self.hosts) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_hosts )
def setIps( self, t_ips, full_output ):
self.ips = t_ips
self.n_ips = len(t_ips)
sys.stdout.write( '%s[+] %d ips found.%s\n' % (fg('green'),self.n_ips,attr(0)) )
if self.n_ips:
fp = open( self.f_ips, 'w' )
fp.write( "\n".join(t_ips) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_ips )
fp = open( self.f_tmphosts, 'w' )
fp.write( full_output )
fp.close()
def setDeadHosts( self, t_dead ):
sys.stdout.write( '[+] %d dead hosts found, cleaning...\n' % len(t_dead) )
for host in t_dead:
self.hosts.remove( host )
def createUrls( self ):
sys.stdout.write( '[+] creating urls...\n' )
for host in self.hosts:
self.urls.append( 'http://'+host )
self.urls.append( 'https://'+host )
for ip in self.ips:
self.urls.append( 'http://'+ip )
self.urls.append( 'https://'+ip )
self.n_urls = len( self.urls )
sys.stdout.write( '%s[+] %d urls created.%s\n' % (fg('green'),self.n_urls,attr(0)) )
if self.urls:
fp = open( self.f_urls, 'w' )
fp.write( "\n".join(self.urls) )
fp.close()
sys.stdout.write( '[+] saved in %s\n' % self.f_urls )
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,541
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/openredirect.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import subprocess
from colored import fg, bg, attr
def run( app ):
sys.stdout.write( '[+] running mod: openredirect\n' )
cmd = 'open-redirect.py -o ' + app.f_hosts + ' 2>&1 >/dev/null &'
os.system( cmd )
# try:
# cmd = 'open-redirect.py -o ' + app.f_hosts
# # print(cmd)
# r = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
# except Exception as e:
# sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,542
|
blackout314/myrecon.py
|
refs/heads/master
|
/modules/screenshot.py
|
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import subprocess
from colored import fg, bg, attr
def run( app ):
sys.stdout.write( '[+] running mod: screenshots\n' )
cmd = 'EyeWitness --headless -f "' + app.f_urls + '" --user-agent "Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0" --no-prompt --threads 10 -d ' + app.d_output + '/eye 2>&1 >/dev/null &'
os.system( cmd )
# try:
# # print(cmd)
# subprocess.Popen( cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL )
# except Exception as e:
# sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
|
{"/myrecon.py": ["/modules/app.py", "/modules/subdomains.py", "/modules/resolve.py"]}
|
2,545
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/group/group.py
|
from flask import render_template, redirect, url_for, flash, Blueprint
from flask_login import login_required, current_user
from expenses_app.group.forms import CreateGroup, AddUserToGroup, RemoveUserFromGroup, AddAccountToGroup, RemoveAccountFromGroup
from expenses_app.models import db, User, Group, Account
grp_bp = Blueprint(
'grp_bp', __name__,
template_folder='templates',
static_folder='static'
)
@grp_bp.route("/", methods=["GET", "POST"])
@login_required
def index():
form = CreateGroup()
if form.validate_on_submit():
new_group_name = form.name.data
exists = Group.query.filter(Group.name == new_group_name).first()
if not exists:
current_user.create_group(new_group_name)
db.session.commit()
else:
flash(f"{new_group_name} has already been taken! Try another name.")
return render_template("index.html", form=form)
@grp_bp.route("/groups/<group_name>/summary", methods=["GET", "POST"])
@login_required
def group_summary(group_name):
group = group_from_group_name(group_name)
if group and group.has_user(current_user):
return render_template("group_summary.html", group=group)
return redirect(url_for("grp_bp.index"))
@grp_bp.route("/groups/<group_name>/access", methods=["GET", "POST"])
@login_required
def group_access(group_name):
group = group_from_group_name(group_name)
if group and group.has_user(current_user):
add_form = AddUserToGroup()
remove_form = RemoveUserFromGroup.from_group(group, current_user)
return render_template("group_access.html", group=group, add_form=add_form, remove_form=remove_form)
return render_template("index.html", group=group)
@grp_bp.route("/groups/<group_name>/remove_user", methods=["POST"])
@login_required
def remove_user_from_group(group_name):
group = group_from_group_name(group_name)
remove_form = RemoveUserFromGroup.from_group(group, current_user)
if remove_form.validate_on_submit():
user_id = remove_form.username.data
old_user = User.query.get(user_id)
group.remove_user(old_user)
db.session.commit()
return redirect(url_for("grp_bp.group_access", group_name=group_name))
@grp_bp.route("/groups/<group_name>/add_user", methods=["POST"])
@login_required
def add_user_to_group(group_name):
group = group_from_group_name(group_name)
add_form = AddUserToGroup()
if add_form.validate_on_submit():
user_name = add_form.username.data
new_user = User.query.filter_by(username=user_name).first()
if new_user:
group.add_user(new_user)
db.session.commit()
else:
flash(f"{user_name} is not a valid username!")
return redirect(url_for("grp_bp.group_access", group_name=group_name))
@grp_bp.route("/groups/<group_name>/accounts")
@login_required
def group_accounts(group_name):
group = group_from_group_name(group_name)
if group and group.has_user(current_user):
add_form = AddAccountToGroup.from_group(group)
remove_form = RemoveAccountFromGroup.from_group(group)
return render_template("group_accounts.html", group=group, add_form=add_form, remove_form=remove_form)
return redirect(url_for("grp_bp.index"))
@grp_bp.route("/groups/<group_name>/add_account", methods=["POST"])
@login_required
def add_account_to_group(group_name):
group = group_from_group_name(group_name)
add_form = AddAccountToGroup.from_group(group)
if add_form.validate_on_submit():
name = add_form.name.data
name_exists = Account.query.filter(Account.name == name, Account.group_id == group.id).first()
if name_exists and name_exists.status == "live":
flash("The account name already exists in this group!")
elif name_exists:
name_exists.status = "removed"
db.session.commit()
else:
user_id = add_form.user.data
user = User.query.get(user_id) if user_id > -1 else None
has_balance = add_form.has_balance.data
balance = add_form.starting_balance.data if has_balance else None
Account.create_account(group, name, user, balance)
db.session.commit()
return redirect(url_for("grp_bp.group_accounts", group_name=group_name))
@grp_bp.route("/groups/<group_name>/remove_account", methods=["POST"])
@login_required
def remove_account_from_group(group_name):
group = group_from_group_name(group_name)
remove_form = RemoveAccountFromGroup.from_group(group)
if remove_form.validate_on_submit():
account_id = remove_form.name.data
old_account = Account.query.get(account_id)
old_account.status = "removed"
db.session.commit()
return redirect(url_for("grp_bp.group_accounts", group_name=group_name))
def group_from_group_name(group_name):
return Group.query.filter(Group.name == group_name).first()
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,546
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/auth/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo
class LogInForm(FlaskForm):
email = StringField("Email", [
InputRequired(message="You must provide an email address to continue"),
Email(message="Email entered is not a valid email address")])
password = PasswordField("Password", [InputRequired(message="You must provide a password to continue")])
submit = SubmitField("Submit")
class Register(FlaskForm):
email = StringField("Email", [
InputRequired(message="You must provide an email address to continue"),
Email(message="Email entered is not a valid email address")])
username = StringField(
"Username", [InputRequired(message="You must profice a username to continue")]
)
password = PasswordField("Password", [InputRequired(message="You must provide a password to continue")])
confirm = PasswordField("Confirm", [
InputRequired(message="You must provide a password to continue"),
EqualTo("password", message="Password and confirmation must be the same!")
])
submit = SubmitField("Submit")
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,547
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/group/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, FloatField, BooleanField
from wtforms.validators import InputRequired
class CreateGroup(FlaskForm):
name = StringField("Name", [InputRequired(message="You must provide a name for the group!")])
create = SubmitField("Create")
class AddUserToGroup(FlaskForm):
username = StringField("Username", [InputRequired(message="You must provide a name for the user!")])
add = SubmitField("Add")
class RemoveUserFromGroup(FlaskForm):
username = SelectField(
"Username", coerce=int, validators=[InputRequired(message="You must provide a user to remove!")])
remove = SubmitField("Remove")
@classmethod
def from_group(cls, group, current_user):
form = cls()
form.username.choices = [
(member.id, member.username) for member in group.members if member != current_user]
return form
class AddAccountToGroup(FlaskForm):
name = StringField("Name", [InputRequired(message="You must provide a name for the account!")])
user = SelectField("User", coerce=int, default=-1)
starting_balance = FloatField("Starting Balance")
has_balance = BooleanField("Has Balance?", default=False)
add = SubmitField("Add")
@classmethod
def from_group(cls, group):
add_form = cls()
users_with_avatars = set(account.avatar_for for account in group.accounts if account.is_avatar and account.status == "live")
add_form.user.choices = [
(user.id, user.username) for user in group.members if user not in users_with_avatars
]
add_form.user.choices.append((-1, "None"))
return add_form
class RemoveAccountFromGroup(FlaskForm):
name = SelectField(
"Name", coerce=int, validators=[InputRequired(message="You must provide an account to be removed!")])
remove = SubmitField("Remove")
@classmethod
def from_group(cls, group):
remove_form = cls()
remove_form.name.choices = [
(account.id, account.name) for account in group.accounts
if account.status == "live" and not account.is_avatar
]
return remove_form
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,548
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/commands.py
|
import click
from flask import current_app as app
from expenses_app.models import db, AuthorisedEmail, User
@app.cli.command("reset-db")
def reset_db():
"""Used to reset the db for the app"""
click.echo("Resetting db...")
db.drop_all()
db.create_all()
click.echo("Done")
@app.cli.command("create-auth-emails")
@click.argument("emails", nargs=-1)
def create_authorised_emails(emails):
"""Adds emails to the Authorised emails db"""
click.echo("Emails added to authorised_email:")
if emails:
for email in emails:
click.echo(f"\t'{email}'")
new_email = AuthorisedEmail()
new_email.email = email
db.session.add(new_email)
db.session.commit()
@app.cli.command("create-user")
@click.argument("username")
@click.argument("password")
@click.argument("email")
def create_user(username, password, email):
"""Adds a user to the db"""
click.echo(f"Creating user with email='{email}', username='{username}'")
auth_email = AuthorisedEmail.query.filter_by(email=email).first()
if auth_email:
User.create_user(auth_email, password, username)
db.session.commit()
else:
raise ValueError(f"{email} is not an authorised email address!")
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,549
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/models.py
|
import datetime as dt
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
db = SQLAlchemy()
class AuthorisedEmail(db.Model):
email_id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
user = db.relationship("User", uselist=False, back_populates="email")
is_registered = db.Column(db.Boolean, nullable=False, default=False)
def register_user(self, user):
if self.is_registered:
return False
self.user = user
self.is_registered = True
return True
def __repr__(self):
return f"<AuthEmail {self.email}>"
group_membership_table = db.Table(
"group_membership", db.metadata,
db.Column("user_id", db.Integer, db.ForeignKey("user.id")),
db.Column("group_id", db.Integer, db.ForeignKey("group.id"))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email_id = db.Column(
db.Integer,
db.ForeignKey("authorised_email.email_id"),
unique=True,
index=True,
nullable=False
)
email = db.relationship("AuthorisedEmail", back_populates="user")
username = db.Column(db.String(50), unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
time_joined = db.Column(db.DateTime, default=dt.datetime.utcnow)
owned_groups = db.relationship("Group", back_populates="owner")
groups = db.relationship("Group", secondary=group_membership_table, back_populates="members")
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@classmethod
def create_user(cls, email, password, username):
new_user = cls()
new_user.set_password(password)
new_user.email = email
new_user.username = username
if email.register_user(new_user):
return new_user
else:
return None
def create_group(self, name):
new_group = Group()
new_group.name = name
new_group.owner = self
new_group.members.append(self)
self.groups.append(new_group)
def __repr__(self):
return f"<User {self.username}>"
class Group(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, index=True, nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey("user.id"), index=True, nullable=False)
owner = db.relationship("User", back_populates="owned_groups")
members = db.relationship("User", secondary=group_membership_table, back_populates="groups")
accounts = db.relationship("Account", back_populates="group")
def has_user(self, user):
return user in self.members
def add_user(self, new_user):
self.members.append(new_user)
def remove_user(self, old_user):
self.members.remove(old_user)
def remove_account(self, old_account):
self.accounts.remove(old_account)
def __repr__(self):
return f"<Group {self.id}, {self.name}>"
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), index=True, nullable=False)
group_id = db.Column(db.Integer, db.ForeignKey("group.id"), index=True, nullable=False)
group = db.relationship("Group", uselist=False, back_populates="accounts")
is_avatar = db.Column(db.Boolean, nullable=False, default=False)
avatar_for_user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True)
avatar_for = db.relationship("User", uselist=False)
has_balance = db.Column(db.Boolean, default=False, nullable=False)
starting_balance_cents = db.Column(db.Integer, nullable=True)
status = db.Column(db.Enum("live", "removed", name="account_status"), nullable=False, default="live")
db.UniqueConstraint("name", "group_id", name="uix_group_name")
@property
def starting_balance(self):
return round(self.starting_balance_cents / 100, 2) if self.starting_balance is not None else None
@starting_balance.setter
def starting_balance(self, new_balance):
if new_balance is not None:
self.has_balance = True
self.starting_balance_cents = round(new_balance * 100)
else:
self.has_balance = False
self.starting_balance_cents = None
@classmethod
def create_account(cls, group, name, user, balance):
new_account = cls()
new_account.group = group
new_account.name = name
new_account.is_avatar = user is not None
new_account.avatar_for = user
new_account.starting_balance = balance
return new_account
class Transactions(db.Model):
id = db.Column(db.Integer, primary_key=True)
group_id = db.Column(db.Integer, db.ForeignKey("group.id"), nullable=False)
paid_by_id = db.Column(db.Integer, db.ForeignKey("account.id"), nullable=False)
on_behalf_of_id = db.Column(db.Integer, db.ForeignKey("account.id"), nullable=False)
description = db.Column(db.Text(200), nullable=True)
store = db.Column(db.Text(100), nullable=True)
amount = db.Column(db.Float, nullable=False)
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,550
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/config.py
|
from os import environ
from pathlib import Path
from dotenv import load_dotenv
BASE_DIRECTORY = Path(__file__).parent
load_dotenv(BASE_DIRECTORY / ".env")
class Config:
FLASK_ENV = "development"
TESTING = True
DEBUG = True
SECRET_KEY = environ.get("SECRET_KEY")
STATIC_FOLDER = "static"
TEMPLATES_FOLDER = "templates"
# Database values
SQLALCHEMY_DATABASE_URI = environ.get("SQLALCHEMY_DATABASE_URI")
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,551
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/auth/auth.py
|
from flask import url_for, flash, render_template, make_response
from flask import Blueprint
from flask_login import login_user, login_required, logout_user
from werkzeug.utils import redirect
from expenses_app import db, login_manager
from expenses_app.auth.forms import LogInForm, Register
from expenses_app.models import AuthorisedEmail, User
auth_bp = Blueprint(
'auth_bp', __name__,
template_folder='templates',
static_folder='static'
)
@auth_bp.route("/login", methods=["GET", "POST"])
def login():
form = LogInForm()
if form.validate_on_submit():
email = form.email.data
password = form.password.data
email = AuthorisedEmail.query.filter(AuthorisedEmail.email == email).first()
if email and email.user and email.user.check_password(password):
user = email.user
login_user(user)
return redirect(url_for("grp_bp.index"))
else:
# TODO: Limit number of retries
flash("Invalid email or password!")
return render_template("login.html", form=form)
@auth_bp.route("/register", methods=["GET", "POST"])
def register():
form = Register()
if form.validate_on_submit():
email = form.email.data
username = form.username.data
username_exists = User.query.filter_by(username=username).first()
auth_email = AuthorisedEmail.query.filter_by(email=email).first()
if auth_email and auth_email.is_registered:
flash("You are already registered! Try logging in instead!")
elif auth_email and username_exists:
flash("That username already exists! Try another")
elif auth_email:
password = form.password.data
user = User.create_user(auth_email, password, username)
db.session.commit()
if user:
login_user(user)
return redirect(url_for("grp_bp.index"))
else:
# TODO: Handle these errors more nicely
return make_response("Something went wrong with registration!", 500)
else:
flash("Email is not an authorised email! This is a private service.")
return render_template("register.html", form=form)
@auth_bp.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("auth_bp.login"))
@login_manager.user_loader
def load_user(user_id):
if user_id is not None:
return User.query.get(user_id)
return None
@login_manager.unauthorized_handler
def unauthorized():
flash('You must be logged in to view that page.')
return redirect(url_for('auth_bp.login'))
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,552
|
doctor-budoka/shared-expenses-site
|
refs/heads/master
|
/expenses_app/__init__.py
|
from flask import Flask
from expenses_app.models import db
from flask_login import LoginManager
login_manager = LoginManager()
def create_app():
app = Flask(__name__, template_folder="templates")
app.config.from_object("config.Config")
db.init_app(app)
login_manager.init_app(app)
with app.app_context():
from expenses_app.auth import auth
app.register_blueprint(auth.auth_bp)
from expenses_app.group import group
app.register_blueprint(group.grp_bp)
from expenses_app import commands
db.create_all()
return app
|
{"/expenses_app/group/group.py": ["/expenses_app/group/forms.py", "/expenses_app/models.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/auth/auth.py": ["/expenses_app/__init__.py", "/expenses_app/auth/forms.py", "/expenses_app/models.py"], "/expenses_app/__init__.py": ["/expenses_app/models.py"]}
|
2,622
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/Features.py
|
#!/usr/bin/env python
import os, pdb
import numpy as np
from scipy import interpolate, signal
from .node_graph import Graph
import matplotlib.pyplot as plt
import math
import uuid
# Superclass for Poles and tracks that stores positional and intensity information
class Feature:
def __init__(self, time, position, intensity, strain='xxx', time_step=1):
self.time = np.array( time )
self.position = np.array( position )
self.intensity = np.array( intensity )
self.id = uuid.uuid1()
self.strain = strain
self.time_step = time_step
self.pixel_time = self.time / self.time_step
# Resample data
self.ResampleData()
def ResampleData( self, sample_factor=3):
# resample data based on time pixels
# Define an interpolation function for positions
ifunc_pos = interpolate.interp1d( self.time, self.position, kind='linear')
# Define a grid of resampled time points
self.time = np.linspace( self.time[0], self.time[-1], max([ 2, sample_factor*(self.time[-1]-self.time[0])]) )
if len(self.time) == 1:
pdb.set_trace()
print('oops')
self.position = ifunc_pos( self.time)
# Class for a Pole
class Pole(Feature):
def __init__(self, time, position, intensity=[], time_step=1, strain='xxx'):
Feature.__init__(self, time, position, intensity, strain=strain, time_step=time_step)
# Define an interpolation/extrapolation function
self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value='extrapolate')
def Print(self):
print('Pole :')
print(' ID : {}'.format(self.id))
print(' Time : {}'.format( self.time))
print(' Position : {}'.format( self.position))
print(' Intensity : {}'.format( self.intensity))
print('--------------------------------- ')
# Class for a Track: additionally stores associated poles and track direction
class Track(Feature):
def __init__(self, time, position, intensity, poles, direction, line_type, time_step=1, strain='xxx'):
Feature.__init__(self, time, position, intensity, time_step=time_step, strain=strain)
if time_step == 1:
pdb.set_trace()
print('woah')
self.poles = poles
self.direction = direction
self.line_type = line_type
self.polePosition = []
self.data = {
'pos_pole' : np.zeros( (2, np.size(self.position) ) ),
'pos_track_rel' : [],
'velocity' : { 'P' : [], 'AP' : [],'I': []},
'runlength' : { 'P' : [], 'AP' : [],'I': []},
'lifetime' : { 'P' : [], 'AP' : [],'I': []},
'lifetime_total' : [],
'velocity_mean' : [],
'switch_count' : [],
'switch_total' : [],
}
self.bad = 0
# Order poles with 1st pole being main pole(closest at start)
# self.OrderPoles()
# Calcualte spindle length
self.CalcSpindleLength()
if self.line_type == 'Curve' and self.direction != 'Ambiguous':
self.direction = 'Ambiguous'
# pdb.set_trace()
# print('1')
def Analyze(self, ipole=0):
# Run useful analysis methods
self.CalcPositionTrackRelativeToPole()
# Split the track and save analysis
tracks_mini, switches = self.SplitTrack( ipole=ipole)
for track in tracks_mini:
if track.direction == 'Poleward':
label = 'P'
elif track.direction == 'Antipoleward':
label = 'AP'
elif track.direction == 'Inactive':
label = 'I'
else:
pdb.set_trace()
raise ValueError('line direction is neither poleward nor antipoleward nor inactive')
# Calculate and append data of the mini track
# Velocity
self.data['velocity'][label] += [track.CalcVelocityLinear(ipole=ipole)]
# Run length
self.data['runlength'][label] += [track.CalcRunLength(ipole=ipole)]
# Lifetime
self.data['lifetime'][label] += [track.CalcLifetime()]
# Combine data from the split tracks
self.data['lifetime_total'] = self.CalcLifetime()
self.data['velocity_mean'] = self.CalcVelocityMean(ipole=ipole)
self.data['switch_count'] = switches
# pdb.set_trace()
# print('woah')
def OrderPoles(self):
# Order the poles with the first one being the closest one to the start of the track
if len(self.poles) != 2:
return
pos = self.CalcPositionTrackRelativeToPole()
if np.absolute( pos[1,0] ) < np.absolute( pos[0,0]):
self.poles = [self.poles[1], self.poles[0]]
def CalcSpindleLength(self):
# Calculate the spindle length
if len(self.poles) != 2:
return
# Find the distance between the poles for the extent of this track
self.spindleLength = np.absolute( self.poles[0].ifunc( self.time) - self.poles[1].ifunc( self.time) )
def CheckViability(self):
# Check if the track's time points are always increasing
self.bad = 0
# Check track time is always increasing
if np.any( np.diff( self.time) <= 0 ):
self.bad = 1
return self.bad
def CheckLinearLifetime( self, min_lt = 0.5):
# Check lifetime is above a min threshold
self.bad = 0
if self.line_type == 'Line' and self.CalcLifetime() < min_lt:
self.bad = 1
return self.bad
def CalcPositionPoleCurrent(self):
# Get pole position at the current time (i.e at the times of the track) by using the interpolation/extrapolation function of the pole
for idx, pole in enumerate( self.poles) :
pos_pole = np.array(pole.ifunc( self.time) )
self.data['pos_pole'][idx,:] = pos_pole
return self.data['pos_pole']
def CalcPositionTrackRelativeToPole(self):
# Calculate track position relative to the pole
pos_pole = self.CalcPositionPoleCurrent()
# If bipolar spindle
if len( self.poles) == 2:
pos_track_rel = np.zeros( np.shape(pos_pole))
for idx,ele in enumerate( pos_pole):
pos_track_rel[idx,:] = np.array( self.position - ele)
# If monopolar spindle
else:
pos_track_rel = np.array( self.position - pos_pole )
# pos_track_rel = pos_track_rel[0,:]
self.data['pos_track_rel'] = pos_track_rel
return pos_track_rel
def CalcVelocityLinear(self, ipole=0):
# Calculate the velocity of this linear track
if self.direction == 'Ambiguous':
raise Exception('Track.CalcVelocityLinear() is only defined for tracks with a single direction')
# Calc relative positions if not done already
if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any():
pos_track_rel = self.CalcPositionTrackRelativeToPole()
else:
pos_track_rel = self.data['pos_track_rel']
# Check
if len(self.time) <= 1:
pdb.set_trace()
print('oops')
# Find Velocity
vel = np.average( np.absolute( np.divide( np.diff( pos_track_rel[ipole,:]) , np.diff( self.time) ) ), weights = np.diff(self.time) )
# Check
if np.size( vel) > 1:
pdb.set_trace()
print('1')
return vel
def CalcRunLength(self, ipole=0):
# Calculate the run length of this track
# Calc relative positions if not done already
if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any():
pos_track_rel = self.CalcPositionTrackRelativeToPole()
else:
pos_track_rel = self.data['pos_track_rel']
# Find Run length
run_length = np.absolute( pos_track_rel[ipole,-1] - pos_track_rel[ipole,0] )
self.data['run_length'] = run_length
# Check
if np.size( run_length) > 1:
pdb.set_trace()
print('1')
return run_length
def CalcLifetime(self):
# Calculate the lifetime of this track
lifetime = self.time[-1] - self.time[0]
self.data['lifetime'] = lifetime
return lifetime
def CalcVelocityMean(self,ipole=0):
# Calculate the mean velocity of this track
if self.line_type == 'Curve' and not self.data['velocity']:
# Split the track
tracks_mini = self.SplitTrack()
for track in tracks_mini:
vv = track.CalcVelocityLinear()
if track.direc == 'Poleward':
self.data['velocity']['P'] += [ vv[ipole]]
elif self.direc == 'Antipoleward':
self.data['velocity']['AP'] += [ vv[ipole]]
vel_mu = np.mean( np.concatenate( (self.data['velocity']['P'], self.data['velocity']['AP']) ) )
return vel_mu
def CalcSwitchingCount(self):
# Calculate the mean velocity of this track
if self.line_type == 'Curve' and not self.data['velocity']:
# Split the track
tracks_mini = self.SplitTrack()
for track in tracks_mini:
if track.direc == 'Poleward':
self.data['velocity']['P'] += [track.CalcVelocityLinear()]
elif self.direc == 'Antipoleward':
self.data['velocity']['AP'] += [track.CalcVelocityLinear()]
vel_mu = np.mean( np.concat( self.data['velocity']['P'], self.data['velocity']['AP']) )
return vel_mu
def CalcIntensityMean(self):
# Calculate the mean intensity of this track
self.data['intensity_mean'] = np.mean( self.intensity)
def SplitTrack(self, ipole=0):
# Spit curved track into multiple mini unidirectional tracks
switches = {
'P' : { 'P' : 0, 'AP': 0, 'I' : 0,},
'AP' : { 'P' : 0, 'AP': 0, 'I' : 0,},
'I' : { 'P' : 0, 'AP': 0, 'I' : 0,},
}
if self.direction != 'Ambiguous':
return [self], switches
if self.line_type == 'Line' and self.direction == 'Ambiguous':
position = np.absolute( self.CalcPositionTrackRelativeToPole() )
vel = np.mean( np.divide( np.diff( position) , np.diff(self.time) ) )
if abs( vel) < 0.005:
self.direction = 'Inactive'
elif vel > 0:
self.direction = 'Antipoleward'
elif vel < 0:
self.direction = 'Poleward'
return [self], switches
# Find track position relative to the pole
if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any():
position = self.CalcPositionTrackRelativeToPole()
else:
position = self.data['pos_track_rel'][ipole,:]
position = np.absolute( position)
states = []
# Smoothing window:
# Use a time-sliding window to find the average velocity, and use that to figure out state
def FindStates_RollingWindow( positions, times, t_window, v_cutoff=1):
dt = np.mean( np.diff( times) )
n_hwindow = int( np.ceil( t_window / (2*dt)) )
states = []
for i, t in enumerate( times):
i_min = max( [ 0, i-n_hwindow])
i_max = min( [ len(times), i+n_hwindow])
vel = np.mean( np.divide( np.diff( positions[i_min:i_max] ) , np.diff( times[i_min:i_max] ) ) )
# pdb.set_trace()
# Assign labels based on value of vel
if abs( vel) < v_cutoff:
states += ['I']
elif vel > 0:
states += ['AP']
elif vel < 0:
states += ['P']
return states
states = FindStates_RollingWindow(position,self.time,5,v_cutoff=0.005)
# Remove singly occuring states
for cnt, st in enumerate(states):
if cnt > 1 and cnt < len(states)-1:
if st != states[cnt-1] and st != states[cnt+1]:
states[cnt] = states[cnt-1]
# set first state to second state
if cnt == 0:
states[cnt] = states[cnt+1]
# set last state to second last state
if cnt == len(states)-1:
states[cnt] = states[cnt-1]
# Count switches and get track indices
p_state = 'XXX'
track = { 'pos': [], 'time': [], 'dir':[] }
idx = [0 , 0]
for cnt, st in enumerate(states):
if cnt == 0:
p_state = st
idx[0] = 0
continue
if st == p_state:
idx[1] += 1
if st != p_state:
# store old stuff
pos = self.position[ idx[0]: idx[1]+2]
# pos.tolist()
time = self.time[ idx[0]: idx[1]+2]
# time.tolist()
track['pos'] += [pos]
track['time'] += [time]
track['dir'] += [p_state]
p_state = st
# begin new
idx[0] = cnt
idx[1] = cnt
# Store the last info
if cnt == len(states)-1:
pos = self.position[ idx[0]: idx[1]+1]
# pos.tolist()
time = self.time[ idx[0]: idx[1]+1]
# time.tolist()
track['pos'] += [pos]
track['time'] += [time]
track['dir'] += [p_state]
# record switches
for cnt, dd in enumerate( track['dir']):
if cnt == 0:
continue
switches[ track['dir'][cnt-1]][track['dir'][cnt]] += 1
# Create track objects from the information
mini_tracks = []
for time, pos, direc in zip( track['time'], track['pos'], track['dir']):
if direc is 'P':
direction = 'Poleward'
elif direc is 'AP':
direction = 'Antipoleward'
elif direc is 'I':
direction = 'Inactive'
pos = pos.tolist()
time = time.tolist()
if len(pos) == 1:
pdb.set_trace()
print('oops')
mini_tracks += [Track( time, pos, self.intensity, self.poles, direction, 'Line', time_step=self.time_step, strain=self.strain)]
# if self.strain == 'B PA-GFP' and mini_tracks[0].direction == 'Inactive':
# pdb.set_trace()
# print('1')
for t in mini_tracks:
if len( t.position) < 2:
pdb.set_trace()
print('oops')
return mini_tracks, switches
def PlotCurveWithStates(self, figname='curved_track.pdf'):
# Plot a curve with states( inactive, poleward and antipoleward) in different colors.
cols = {
'Inactive' : 'blue',
'Poleward' : 'red',
'Antipoleward' : 'green',
}
minis, switches = self.SplitTrack()
# Generate figure and axes and set colors
fig = plt.figure( figsize=(6,4) )
ax = fig.add_subplot(111)
pos_pole = self.CalcPositionPoleCurrent()
for idx,pole in enumerate(self.poles):
ax.plot( pos_pole[idx,:], self.time, linewidth=3 )
for trk in minis:
ax.plot( trk.position, trk.time, linewidth=2, color=cols[trk.direction] )
plt.text(1,1, 'Poleward', color='red', transform=ax.transAxes, ha='right', va='top')
plt.text(1,0.95, 'AntiPoleward', color='green', transform=ax.transAxes, ha='right', va='top')
plt.text(1,0.9, 'Inactive', color='blue', transform=ax.transAxes, ha='right', va='top')
plt.text(1,0.85, 'MainPole', color='skyblue', transform=ax.transAxes, ha='right', va='top')
plt.text(1,0.8, 'SecondaryPole', color='orange', transform=ax.transAxes, ha='right', va='top')
# Set axes limits
axes = plt.gca()
x_min = min([ min(self.position), min([min( pol.position) for pol in self.poles]) ]) -0.5
x_max = max([ max( self.position), max([max( pol.position) for pol in self.poles]) ]) +0.5
axes.set_xlim([ x_min, x_max])
axes.set_xlabel('Position')
axes.set_ylim([ min(self.time)-5,max(self.time)+5])
axes.set_ylabel('Time')
fig.savefig( figname)
def Trim(self,lrange=[0,100]):
# Trim the track to be inside the range specified
if len( self.poles) == 1:
return self
# Get indices of times when spindle length is between the given range values
lens = self.spindleLength
idx = np.argwhere( (lens > lrange[0]) & (lens < lrange[1]) ).T[0].tolist()
if len(idx) == 0:
return None
idx = range( idx[0], idx[-1]+1)
# Create the new trimmed track
tracknew = Track( self.time[idx], self.position[idx], self.intensity, self.poles, self.direction, self.line_type, time_step=self.time_step, strain=self.strain)
return tracknew
def Print(self):
print('Feature :')
print(' ID : {}'.format(self.id))
print(' Direction : {}'.format( self.direction))
print(' Line type : {}'.format( self.line_type))
print(' Time : {}'.format( self.time))
print(' Position : {}'.format( self.position))
print(' Intensity : {}'.format( self.intensity))
print('--------------------------------- ')
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,623
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/smooth_test.py
|
#!/usr/bin/env python
import os, pdb
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from .Kymograph import *
import shutil
'''
Name: breakBipolar.py
Description: Plots the pole separation of a bipolar file
'''
folds = ['wild type']
savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_smoothing')
parent_path = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/temp')
def smooth_data(arr, span):
re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same")
# The "my_average" part: shrinks the averaging window on the side that
# reaches beyond the data, keeps the other side the same size as given
# by "span"
re[0] = np.average(arr[:span])
for i in range(1, span + 1):
re[i] = np.average(arr[:i + span])
re[-i] = np.average(arr[-i - span:])
return re
if not Path.exists( savepath):
os.mkdir( savepath)
for jj,jfold in enumerate(folds):
print('Data: {0}'.format(jfold))
mainpath = parent_path / jfold
# txt files
files2break = mainpath.glob('*txt')
# Pole separation vs time
print('Calculating pole separations...')
for jj, fil in enumerate(files2break):
# print(fil)
kymo = Kymograph(fname=str(fil))
if len(kymo.poles) == 2:
fig, ax = plt.subplots()
time = np.array( sorted( np.hstack( (kymo.poles[0].time, kymo.poles[1].time) ) )[1::10] )
time = np.linspace(time[0], time[-1], int(np.ceil(time[-1]-time[0])))
spindleLength = np.array( np.absolute( kymo.poles[1].ifunc(time)- kymo.poles[0].ifunc(time)) )
slope_windows = [5,25,50]
for slope in slope_windows:
spindleLength_cnv = smooth_data(spindleLength, slope)
ax.plot(time, spindleLength_cnv, label='Window = {0}'.format(slope))
ax.plot(time, spindleLength, 'k:', lw=2,label='Original')
ax.legend()
ax.set(xlabel='Time (s)', ylabel=r'Pole separation ($\mu m$)')
plt.tight_layout()
plt.savefig(savepath / 'smoothing_{0}_{1}.pdf'.format(mainpath.stem, jj))
plt.close(fig)
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,624
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/Strain.py
|
#!/usr/bin/env python
import os, pdb
from .Load import Load
from .Kymograph import Kymograph
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import math, random
import pickle
from pathlib import Path
from scipy import interpolate, signal
class Strain:
def __init__(self, trackpaths, label="xxx"):
self.paths = trackpaths
self.label = label
self.LoadKymographs()
self.tracks = []
def LoadKymographs(self):
# Initialize kymograph classes for each loaded file
self.kymographs = []
for pth in self.paths:
print(pth)
kname = pth.split("/")[-1]
self.kymographs += [Kymograph(fname=pth)]
def GetTracks(self, spindle_length=None):
# Get tracks that lie within the spindle lengths defined. Trims the tracks
# Combine tracks from all kymographs
self.tracks = []
for kymo in self.kymographs:
for track in kymo.tracks:
trimmed = track.Trim(lrange=spindle_length)
if trimmed is not None:
self.tracks += [trimmed]
def TrimUsingKmeansLabel(self, kmean_label):
# load kmeans model
kmeans_path = Path(self.paths[0]).parent / "kmeans.pickle"
with open(kmeans_path, "rb") as f:
model = pickle.load(f)
for kymo in self.kymographs:
# Only do stuff if its bipolar
if len(kymo.poles) == 1:
continue
# Times
time = np.array(
sorted(np.hstack((kymo.poles[0].time, kymo.poles[1].time)))[1::10]
)
time = np.linspace(time[0], time[-1], int(np.ceil(time[-1] - time[0])))
# Calculate spindle length, velocity, acceleration
clen = np.absolute(kymo.poles[1].ifunc(time) - kymo.poles[0].ifunc(time))
cvel = list((clen[1:] - clen[:-1]) / (time[1:] - time[:-1]))
cvel.insert(0, cvel[0])
cvel = np.array(cvel).reshape(-1, 1)
# use velocity to predict label using fitted model
labels_raw = model.predict(cvel)
labels = self.ForceLabelsOneWay(
self.SmoothClassifiedLabels(labels_raw, span=100)
)
if np.max(labels) == 0 or np.max(clen) < 2:
AB_transition = -1
if kmean_label == 0:
time_keep = [time[0], time[-1]]
elif kmean_label == 1:
time_keep = [-1, -1]
elif np.min(labels) == 1:
AB_transition = -1
if kmean_label == 0:
time_keep = [-1, -1]
elif kmean_label == 1:
time_keep = [time[0], time[-1]]
else:
AB_transition = time[np.where((labels == 1) & (clen > 2))[0][0]]
if kmean_label == 0:
time_keep = [time[0], AB_transition]
elif kmean_label == 1:
time_keep = [AB_transition, time[-1]]
# if kymo.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH':
# pdb.set_trace()
# print(AB_transition)
# print(time_keep)
print("Total time = {0:.2f} - {1:.2f}".format(time[0], time[-1]))
print("Anaphase B = {0:.2f}".format(AB_transition))
print("Kmeans Label = {0}".format(kmean_label))
print("Time 2 keep = {0}".format(time_keep))
kymo.TrimBasedOnTime(time_keep)
# SmoothClassifiedLabels {{{
def SmoothClassifiedLabels(self, label, span=100):
# smooth_data {{{
def smooth_data(arr, span):
re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same")
# The "my_average" part: shrinks the averaging window on the side that
# reaches beyond the data, keeps the other side the same size as given
# by "span"
re[0] = np.average(arr[:span])
for i in range(1, span + 1):
re[i] = np.average(arr[: i + span])
re[-i] = np.average(arr[-i - span :])
return re
# }}}
# Smoothed Labels
label_new = np.where(
np.array(smooth_data(label, min([span, int(len(label) / 2)]))) >= 0.5, 1, 0
)
# Once 1, always 1
# label_perm = [max(label_new[:1+jj]) for jj in range(len(label_new))]
return label_new
# }}}
# ForceLabelsOneWay {{{
def ForceLabelsOneWay(self, label):
labels = [np.max(label[: 1 + idx]) for idx in range(len(label))]
return np.array(labels)
# }}}
def TossFarTracks(self, threshold):
# Toss tracks that start above a threshold distance from the first pole
self.tracks = []
for kymo in self.kymographs:
tracksKeep = []
for track in kymo.tracks:
if track.CalcPositionRelative()[0, 0] < threshold:
tracksKeep.append(track)
kymo.tracks = tracksKeep
self.GetTracks()
def TossCloseTracks(self, threshold):
# Toss tracks that start above a threshold distance from the first pole
self.tracks = []
for kymo in self.kymographs:
tracksKeep = []
for track in kymo.tracks:
if track.CalcPositionRelative()[0, 0] > threshold:
tracksKeep.append(track)
kymo.tracks = tracksKeep
self.GetTracks()
def GetSegmentsPAP(self):
# Splits the tracks into segments and returns [poleward, antipoleward]
segs = {"Poleward": [], "Antipoleward": []}
bad_cnt = 0
good_cnt = 0
for track in self.tracks:
segments, _ = track.SplitTrack()
# Toss short-time segments
for seg in segments:
if seg.time[-1] - seg.time[0] < 2 * seg.time_step:
bad_cnt += 1
elif seg.direction is "Poleward":
good_cnt += 1
segs["Poleward"] += [seg]
elif seg.direction is "Antipoleward":
good_cnt += 1
segs["Antipoleward"] += [seg]
# if good_cnt + bad_cnt > 0:
# print('Major segments : {0} ({1:.2f}%)'.format(good_cnt, 100*good_cnt/(good_cnt+bad_cnt)))
return [segs["Poleward"], segs["Antipoleward"]]
def FilterSegments(self, segments):
# Filter segments by imposing restrictions on velocity, run length and lifetimes
# Velocities
print("1")
def GetRunLengths(self):
# Get run lengths of poleward and antipoleward tracks (units nm)
segsPAP = self.GetSegmentsPAP()
runlens = []
for segs in segsPAP:
runlen = [
1000
* np.absolute(
seg.CalcPositionRelative()[0, -1] - seg.CalcPositionRelative()[0, 0]
)
for seg in segs
]
runlens += [runlen]
return runlens
def GetVelocities(self):
# Get velocities of poleward and antipoleward tracks (units nm/sec)
segsPAP = self.GetSegmentsPAP()
vels = []
for segs in segsPAP:
vel = [1000 * seg.CalcVelocity()[0] for seg in segs]
vels += [vel]
return vels
def GetLifetimes(self):
# Get velocities of poleward and antipoleward tracks (units sec)
segsPAP = self.GetSegmentsPAP()
lifes = []
for segs in segsPAP:
life = [seg.time[-1] - seg.time[0] for seg in segs]
lifes += [life]
return lifes
def GetIntensities(self):
# Get velocities of poleward and antipoleward tracks
segsPAP = self.GetSegmentsPAP()
ins = []
for segs in segsPAP:
inss = [seg.CalcIntensity() for seg in segs]
ins += [inss]
return ins
def GetTotalSwitches(self):
# Get total switches out of state A to state B (states: poleward,antipoleward,inactive)
switches = {"P": 0, "AP": 0, "I": 0}
labs = ["P", "AP", "I"]
for track in self.tracks:
segments, trans = track.SplitTrack()
for lab in labs:
switches[lab] += sum([a for k, a in trans[lab].items()])
return switches
def GetFractionKymographsWithMovement(self):
# Get fraction of kymographs with movement events
nMovement = 0
for kymo in self.kymographs:
nAdd = 0
for track in kymo.tracks:
trks_all, _ = track.SplitTrack()
for mini in trks_all:
if mini.direction != "Inactive":
nAdd = 1
nMovement += nAdd
return nMovement / len(self.kymographs)
def GetDirectionalEventsPerMinute(self):
# Get total number of directional events per minute
events = {"P": 0, "AP": 0}
events_per_min = {"P": 0, "AP": 0}
for track in self.tracks:
segs, _ = track.SplitTrack()
for seg in segs:
if seg.direction == "Poleward":
events["P"] += 1
elif seg.direction == "Antipoleward":
events["AP"] += 1
# total kymograph time
time_total = 0
for kymo in self.kymographs:
time_total += kymo.poles[0].time[-1] - kymo.poles[0].time[0]
events_per_min["P"] = events["P"] / (time_total / 60)
events_per_min["AP"] = events["AP"] / (time_total / 60)
return events_per_min, events
def GetDirectionalEventsPerMinutePerCell(self):
# Get total number of directional events per minute per cell
events = {"P": [], "AP": []}
times = {"P": [], "AP": []}
for kymo in self.kymographs:
nP = 0
nAP = 0
for track in kymo.tracks:
segs, _ = track.SplitTrack()
for seg in segs:
if seg.direction == "Poleward":
nP += 1
elif seg.direction == "Antipoleward":
nAP += 1
if nP + nAP > 0:
time_total = kymo.poles[0].time[-1] - kymo.poles[0].time[0]
else:
time_total = 1
if time_total > 1:
events["P"].append(nP)
events["AP"].append(nAP)
times["P"].append(time_total / 60)
times["AP"].append(time_total / 60)
# events['P'].append( nP/(time_total/60))
# events['AP'].append( nAP/(time_total/60))
return events, times
def GetTotalDirectionalTime(self):
# Find total number of directed time
times = {"P": 0.01, "AP": 0.01, "I": 0.01}
for track in self.tracks:
# Split the track into linear tracks
segs, _ = track.SplitTrack()
# Calculate lifetimes and sum it all up for each direction of the track
for seg in segs:
if seg.direction == "Poleward":
times["P"] += seg.time[-1] - seg.time[0]
elif seg.direction == "Antipoleward":
times["AP"] += seg.time[-1] - seg.time[0]
elif seg.direction == "Inactive":
times["I"] += seg.time[-1] - seg.time[0]
else:
raise ValueError("what is this unknown line direction")
return times
def GetTotalDirectionalTimeMinutes(self):
# Find total number of directed time
times = {"P": 0.01, "AP": 0.01, "I": 0.01}
for track in self.tracks:
# Split the track into linear tracks
segs, _ = track.SplitTrack()
# Calculate lifetimes and sum it all up for each direction of the track
for seg in segs:
if seg.direction == "Poleward":
times["P"] += (seg.time[-1] - seg.time[0]) / 60
elif seg.direction == "Antipoleward":
times["AP"] += (seg.time[-1] - seg.time[0]) / 60
elif seg.direction == "Inactive":
times["I"] += (seg.time[-1] - seg.time[0]) / 60
else:
raise ValueError("what is this unknown line direction")
return times
def GetSwitchFrequencyPerMinutePerCell(self):
events = {"P": [], "AP": [], "I": []}
times_all = {"P": [], "AP": [], "I": []}
for kymo in self.kymographs:
# Get total track time
times = {"P": 10 ** -7, "AP": 10 ** -7, "I": 10 ** -7}
for track in kymo.tracks:
# Split the track into linear tracks
segs, _ = track.SplitTrack()
# Calculate lifetimes and sum it all up for each direction of the track
for seg in segs:
if seg.direction == "Poleward":
times["P"] += (seg.time[-1] - seg.time[0]) / 60
elif seg.direction == "Antipoleward":
times["AP"] += (seg.time[-1] - seg.time[0]) / 60
elif seg.direction == "Inactive":
times["I"] += (seg.time[-1] - seg.time[0]) / 60
else:
raise ValueError("what is this unknown line direction")
# Get total switches
switches = {"P": 0, "AP": 0, "I": 0}
labs = ["P", "AP", "I"]
for track in kymo.tracks:
segments, trans = track.SplitTrack()
for lab in labs:
switches[lab] += sum([a for k, a in trans[lab].items()])
# switch frequencies
for lab in labs:
events[lab].append(switches[lab])
times_all[lab].append(times[lab])
return events, times_all
def GetStartDistances(self):
# Get start distances
dist_P = []
dist_AP = []
for track in self.tracks:
segs, _ = track.SplitTrack()
for seg in segs:
if seg.direction == "Poleward":
dist_P.append(seg.CalcPositionRelative()[0, 0])
elif seg.direction == "Antipoleward":
dist_AP.append(seg.CalcPositionRelative()[0, 0])
return [dist_P, dist_AP]
def GetEndDistances(self):
# Get end distances
dist_P = []
dist_AP = []
for track in self.tracks:
segs, _ = track.SplitTrack()
for seg in segs:
if seg.direction == "Poleward":
dist_P.append(seg.CalcPositionRelative()[0, -1])
elif seg.direction == "Antipoleward":
dist_AP.append(seg.CalcPositionRelative()[0, -1])
return [dist_P, dist_AP]
def GetAverageDistances(self):
# Get average distances
segsPAP = self.GetSegmentsPAP()
avgdists = []
for segs in segsPAP:
avgdist = []
for seg in segs:
ifunc = interpolate.interp1d(
seg.time, seg.CalcPositionRelative()[0, :], kind="linear"
)
avgdist.extend(
[
1000 * dd
for dd in ifunc(
np.arange(seg.time[0], seg.time[-1], seg.time_step)
)
]
)
# avgdist = [np.mean(seg.CalcPositionRelative()[0, :]) for seg in segs]
avgdists += [avgdist]
return avgdists
def GraphPAP_RunLengths(self, axs, **kwargs):
lens_pap = self.GetRunLengths()
# Toss runlengths over 2 micron
for idx, lens in enumerate(lens_pap):
ld = [i for i in lens if i < 2]
lens_pap[idx] = ld
self.GraphPAP(lens_pap, axs, unit=r"$\mu$" + "m", **kwargs)
def GraphPAP_Velocities(self, axs, **kwargs):
vels_pap = self.GetVelocities()
# Convert vel from micron/sec to nm/sec. Toss vel over 200 micron/sec
for idx, v in enumerate(vels_pap):
ld = [i * 1000 for i in v if i < 0.2]
vels_pap[idx] = ld
self.GraphPAP(vels_pap, axs, unit="nm/s", **kwargs)
def GraphPAP_Lifetimes(self, axs, **kwargs):
lifes_pap = self.GetLifetimes()
# toss lifetimes over 100 sec
for idx, life in enumerate(lifes_pap):
ld = [i for i in life if i < 100]
lifes_pap[idx] = ld
self.GraphPAP(lifes_pap, axs, unit="s", **kwargs)
def GraphPAP_StartPosition(self, axs, **kwargs):
startPos = self.GetStartDistances()
self.GraphPAP(startPos, axs, unit=r"$\mu$" + "m", **kwargs)
def GraphPAP_EndPosition(self, axs, **kwargs):
endPos = self.GetEndDistances()
self.GraphPAP(endPos, axs, unit=r"$\mu$" + "m", **kwargs)
def GraphPAP(
self, dat, axs, col="m", lab=None, unit="", xmax=None, xlab=None, ylab=None
):
# pdb.set_trace()
for datt, ax in zip(dat, axs):
# Get x axis max
if xmax is None:
# xmax = math.ceil( max(datt))
xmax = max(datt)
# find bin edges
nbins = 16
bins = np.array([float(el) for el in range(nbins + 1)])
bins = np.dot(np.array(xmax / float(nbins)), bins)
# Plot histogram
aaa = ax.hist(datt, bins, edgecolor="k", color=col)
# Add labels
if xlab is not None:
ax.set_xlabel(xlab)
if ylab is not None:
ax.set_ylabel(ylab)
# Set x-limits
ax.set_xlim([0, xmax])
# Set y-limits and ticks
ymax = int(math.ceil(ax.get_ylim()[1] / 10) * 10)
ax.set_yticks([0, ymax / 2, ymax])
ax.set_ylim([0, ymax + 2])
# Add strain label
if lab is not None:
ax.text(
0.95,
0.95,
lab,
ha="right",
va="top",
transform=ax.transAxes,
fontsize=12,
weight="roman",
)
# Add median line
ax.axvline(np.mean(datt), color="k", linestyle="dashed", linewidth=5)
# Add median value label
# mu = np.median( lens )
mu = np.mean(datt)
form = "%.2f"
mu_str = np.array2string(mu, formatter={"float_kind": lambda mu: form % mu})
std = np.std(datt)
std = std / np.sqrt(len(datt))
std_str = np.array2string(
std, formatter={"float_kind": lambda std: form % std}
)
ax.text(
0.95,
0.85,
r"{0} $\pm$ {1} {2}".format(mu_str, std_str, unit),
ha="right",
va="top",
transform=ax.transAxes,
fontsize=12,
weight="roman",
)
ax.text(
0.95,
0.75,
r"N = {0}".format(len(datt)),
ha="right",
va="top",
transform=ax.transAxes,
fontsize=12,
weight="roman",
)
def PlotTrackByStates(self, cols, k=5):
# Generate figure and axes and set colors
fig, axs = plt.subplots(3, 1, figsize=(10, 6), sharex=True)
axsd = {"Poleward": axs[0], "Antipoleward": axs[1], "Inactive": axs[2]}
k = min([k, len(self.tracks)])
for idx, track in enumerate(random.sample(self.tracks, k)):
minis, _ = track.SplitTrack()
ax = axsd[minis[0].direction]
# if self.label == "TD" and minis[0].direction == 'Antipoleward':
# pdb.set_trace()
# print('1')
for trk in minis:
pos_track_rel = trk.CalcPositionRelative()
ax.plot(
trk.time - track.time[0],
np.absolute(pos_track_rel[0, :]),
linewidth=0.5,
color=cols[trk.direction],
alpha=0.4,
)
# Set x and y limits of subplots
xl = (0, 0)
yl = (0, 0)
for ax in axs:
xli = ax.get_xlim()
yli = ax.get_ylim()
xl = (min([xli[0], xl[0]]), max([xli[1], xl[1]]))
yl = (min([yli[0], yl[0]]), max([yli[1], yl[1]]))
# Force x limit
xl = (xl[0], 400)
# Legend
axs[0].plot([], [], label="Poleward", color=cols["Poleward"])
axs[0].plot([], [], label="AntiPoleward", color=cols["Antipoleward"])
axs[0].plot([], [], label="Inactive", color=cols["Inactive"])
axs[0].legend(frameon=False)
axs[2].set_xlabel("Time (s)")
# axs[0].set_ylabel(r'Distance from SPB ($\mu m$)')
axs[1].set_ylabel(r"Distance from SPB ($\mu m$)")
# axs[2].set_ylabel(r'Distance from SPB ($\mu m$)')
axs[0].set_ylim(bottom=-0.01, top=yl[1])
axs[1].set_ylim(bottom=-0.01, top=yl[1])
axs[2].set_ylim(bottom=-0.01, top=yl[1])
axs[0].set_xlim(left=-1, right=xl[1])
# axs[0].set_xlim(right=300) # WT monopolar
# axs[0].set_xlim(right=400) # KLP5D monopolar
# axs[0].xaxis.set_ticklabels([])
# axs[1].xaxis.set_ticklabels([])
plt.tight_layout()
fig.savefig("tracks_by_state_{0}.pdf".format(self.label))
# fig.subplots_adjust(hspace = -0.2)
plt.close()
def PlotAllTracks(self, cols):
fig, ax = plt.subplots(figsize=(4, 3))
# axsd = {'Poleward': axs[0], 'Antipoleward': axs[1], 'Inactive':axs[2]}
for idx, track in enumerate(self.tracks):
minis, _ = track.SplitTrack()
for trk in minis:
pos_track_rel = trk.CalcPositionRelative()
ax.plot(
trk.time - track.time[0],
np.absolute(pos_track_rel[0, :]),
linewidth=0.5,
color=cols[trk.direction],
alpha=0.6,
)
# Set x and y limits of subplots
# ymax=9
# xmax=1000
# ax.set_xlim(left=0.0,right=xmax)
# ax.set_ylim(bottom=-0.1,top=ymax)
ax.set_xlim(left=0.0)
ax.set_ylim(bottom=-0.01)
ymax = ax.get_ylim()[1]
xmax = ax.get_xlim()[1]
ax.set(
xlabel="Time (s)",
ylabel="Distance from SPB ($\mu m$)".format(len(self.tracks)),
)
# Adding text inside a rectangular box by using the keyword 'bbox'
plt.text(0.8 * xmax, 0.6 * ymax, "N = {0}".format(len(self.tracks)), fontsize=8)
# Legend
ax.plot([], [], label="Poleward", color=cols["Poleward"])
ax.plot([], [], label="Antipoleward", color=cols["Antipoleward"])
ax.plot([], [], label="Inactive", color=cols["Inactive"])
ax.legend()
plt.tight_layout()
plt.savefig("tracks_{0}.pdf".format(self.label))
# fig.subplots_adjust(hspace = -0.2)
plt.close()
if __name__ == "__main__":
print("no default implementation")
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,625
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/Kymograph.py
|
#!/usr/bin/env python
import os, pdb
import math
import numpy as np
from scipy import interpolate
from .node_graph import Graph
import matplotlib.pyplot as plt
from .Features import *
from .ReadFiles import *
'''
Name: Kymograph.py
Description: Parses general, poles and feature information for a single kymograph and stores the data accordingly
'''
class Kymograph:
def __init__(self, fname='example.txt'):
# get label without '.txt'
self.label = fname[:-9]
# Read file information
self.general,self.poles,self.tracks = ReadTxt( fname)
# Remove Bad tracks
self.RemoveBadTracks()
# Merge tracks whose ends are close enough
self.MergeTracks(self.tracks)
# Order track poles
for track in self.tracks:
track.OrderPoles()
# Trim tracks based on kmeans label
# self.TrimTracksKmeansLabel()
def RemoveBadTracks(self):
# Remove tracks that go backward in time
# Find bad tracks
bad_tracks = []
for track in self.tracks:
if not track.CheckViability():
bad_tracks += [track]
if len( bad_tracks) != 0:
print('Found some bad tracks')
# Remove bad tracks
for track in bad_tracks:
self.tracks.remove( track)
def TrimBasedOnTime(self, time_keep=[-1,-1]):
# Trim poles
poles_new = []
for pole in self.poles:
trimmed = pole.TrimBasedOnTime(time_keep)
if trimmed is not np.nan and trimmed is not None:
poles_new.append(trimmed)
# print(poles_new)
# if self.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH':
# pdb.set_trace()
self.poles= poles_new
# Trim tracks
tracks_new = []
for track in self.tracks:
trimmed = track.TrimBasedOnTime(time_keep)
if trimmed is not np.nan and trimmed is not None:
# trimmed.poles = poles_new
tracks_new.append(trimmed)
# if self.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH':
# pdb.set_trace()
# print(tracks_new)
self.tracks = tracks_new
def MergeTracks(self, tracks):
# Merge linear tracks into a single bidirectional track
# Represent tracks as nodes in a directional graph
box_half_width = 0.15
box_height = 2*self.general['time_step']
g = Graph( len( tracks) )
matches = [[] for i in range( len(tracks) )]
dist = [[] for i in range( len(tracks) )]
# For each node, find prospective matches
for v, trackv in enumerate( tracks):
# Find all possible matches
for w, trackw in enumerate( tracks):
# if tracks are close together
if ( trackv.position[-1]-box_half_width < trackw.position[0] < trackv.position[-1]+box_half_width ) and ( trackv.time[-1] < trackw.time[0] < trackv.time[-1]+box_height ):
# Add as a possible match
matches[v].append(w)
# find distance of match
t1 = [ trackv.position[-1], trackv.time[-1]]
t2 = [ trackw.position[0], trackw.time[0]]
dist[v].append( math.sqrt( ((t1[0]-t2[0])**2)+((t1[1]-t2[1])**2) ) )
# Find the best match
for v, trackv in enumerate( tracks):
if len( matches[v]) == 0:
continue
# Find match with lowest distance
w = matches[v][dist[v].index( min( dist[v]) )]
# Add edge between v and w
g.addEdge(v,w)
# Find connected components
cc = g.connectedComponents()
# Merge the tracks in time order
tracks_merged = []
for comp in cc:
time = None
position = None
if len( comp) == 1:
line_type = tracks[comp[0]].line_type
direction = tracks[comp[0]].direction
else:
line_type = 'Curve'
direction = 'Ambiguous'
for v in comp:
if time is None:
time = tracks[v].time
else:
time = np.concatenate( (time, tracks[v].time) )
if position is None:
position = tracks[v].position
else:
position = np.concatenate( (position, tracks[v].position) )
tracks_merged += [Track(time, position, self.general['image'], self.poles, direction, line_type, time_step = self.general['time_step'], pos_step=self.tracks[0].pos_step)]
return tracks_merged
def PlotTracks( self, tracks, poles=[], figName='tracks.pdf'):
# Plot the given tracks in a figure
# Number of tracks
nt = len(tracks)
# Number of poles
np = len(poles)
# Number of plots
nn = nt+np
# Colormap
cm = plt.get_cmap('gist_rainbow')
# Generate figure and axes and set colors
fig = plt.figure( figsize=(12,8) )
ax = fig.add_subplot(111)
ax.set_prop_cycle(color=[cm( 1.*i/nn) for i in range(nt)])
for idx,pole in enumerate(poles):
ax.plot( pole.position, pole.time, linewidth=3, label = 'Pole {}'.format(1+idx))
for idx,track in enumerate(tracks):
ax.plot( track.position, track.time, linewidth=2, label = 'Track {}'.format(1+idx))
plt.legend()
# Set axes limits
time_max = max( [max(trk.time) for trk in tracks] + [max(pol.time) for pol in poles] )
time_min = min( [min(trk.time) for trk in tracks] + [min(pol.time) for pol in poles] )
x_max = max( [max(trk.position) for trk in tracks] + [max(pol.position) for pol in poles] ) + 0.5
x_min = min( [min(trk.position) for trk in tracks] + [min(pol.position) for pol in poles] ) - 0.5
axes = plt.gca()
axes.set_xlim([x_min, x_max])
axes.set_ylim([time_min,time_max])
fig.savefig( figName )
def FindIntensityAlongSpindle(self, lrange=[0, 10]):
if len( self.poles) != 2:
return None
# pdb.set_trace()
dimT = np.shape( self.general['image'])[0]
dimX = np.shape( self.general['image'])[1]
# interpolation function for image
try:
f = interpolate.interp2d( self.tracks[0].pos_step*np.arange(0,dimX), self.tracks[0].time_step*np.arange(0,dimT), self.general['image'])
except:
pdb.set_trace()
print('1')
# Get times to find pole position
tStart = max( self.poles[0].time[0], self.poles[1].time[0])
tEnd = min( self.poles[0].time[-1], self.poles[1].time[-1])
tVec = np.linspace( tStart, tEnd, math.ceil( (tEnd-tStart)/self.tracks[0].time_step) )
# Get pole position
pos0 = self.poles[0].ifunc( tVec)
pos1 = self.poles[1].ifunc( tVec)
# pdb.set_trace()
# Trim to be within range
pos0c = [i for i,j in zip(pos0,pos1) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]]
pos1c = [j for i,j in zip(pos0,pos1) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]]
tVecc = [k for i,j,k in zip(pos0,pos1,tVec) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]]
if len(pos0c) == 0:
return None
# Find intensity between poles for each time value
intense = np.zeros( (len(tVecc),100) )
for i, tt in enumerate(tVecc):
pVec = np.linspace( pos0c[i], pos1c[i],100)
ttVec = tt*np.ones((100,))
intense[i,:] = f( pVec, ttVec)[0,:]
return intense
# # Trim tracks based on kmeans label
# def TrimTracksKmeansLabel(self, label=-1):
def DisplayTracks(self, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(6,6))
# Display kymograph image
ax.imshow( self.tracks[0].image)
# Plot tracks
for track in self.tracks:
ax.plot( track.position/track.pos_step, track.time/track.time_step, color='red', linewidth=3)
plt.show()
def Print(self):
# Print information about poles and tracks
print(' ')
print(' path: {}'.format(self.general['path_tiff'][0:-1]))
print(' name: {}'.format(self.label ))
print(' n_poles_exp: {}'.format(self.general['n_poles']))
print(' n_poles_found: {}'.format(len(self.poles)))
print(' n_tracks_exp: {}'.format(self.general['n_tracks']))
print(' n_tracks_found: {}'.format( len(self.tracks)))
print(' ')
for feat in self.poles+self.tracks:
feat.Print()
##########################################
if __name__ == "__main__":
print('No default run method')
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,626
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/KymographAnalysis.py
|
#!/usr/bin/env python
import os, pdb, sys
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
import seaborn as sns
import math, random
import glob, yaml, copy, shutil
from src.Strain import *
"""
Name: KymographAnalysis.py
Description: Parses and combines tracks from multiple kymographs for mass analysis
"""
class KymographAnalysis:
def __init__(self):
self.cwd = os.getcwd()
# Read config file
with open("config.yaml") as f:
self.config = yaml.load(f, Loader=yaml.CLoader)
self.InitStrains()
self.Analyze()
# InitStrains {{{
def InitStrains(self):
# Initialize strains with track files
self.strains = []
# Get filenames for each strain
for strain in self.config["strains"]:
trackpaths = []
for fpath in strain["path"]:
trackpaths += glob.glob(fpath)
# Initialize
cstrain = Strain(trackpaths, label=strain["type"])
cstrain.color = tuple(np.array(strain["color"]) / 255)
self.strains += [cstrain]
# Use Kmeans classfication if required
if "useBipolarKmeansLabel" in self.config.keys():
if self.config["useBipolarKmeansLabel"]:
for strain, strain_c in zip(self.strains, self.config["strains"]):
strain.TrimUsingKmeansLabel(kmean_label=strain_c["kmean_label"])
# }}}
# Analyze {{{
def Analyze(self):
# Initialize graphing directory
if "saveName" in self.config.keys():
gdir = os.path.join(self.cwd, self.config["saveName"])
else:
gdir = os.path.join(self.cwd, "result")
if os.path.exists(gdir):
shutil.rmtree(gdir, ignore_errors=True)
os.mkdir(gdir)
os.chdir(gdir)
# Analyze by groups
if self.config["analyzeByLength"] is True:
for group in self.config["analyzeGroups"]:
# CD to directory
ggdir = os.path.join(gdir, group["type"])
os.mkdir(ggdir)
os.chdir(ggdir)
print("Analyzing {0}".format(group["type"]))
strains = copy.deepcopy(self.strains)
# Get tracks that match this spindle length
for strain in strains:
strain.GetTracks(spindle_length=group["length"])
# pdb.set_trace()
if self.config["analyzeSpindleIntensity"] is True:
self.GraphSpindleIntensity(
strains, lrange=group["length"], gname=group["type"]
)
self.Graph(strains, gname=group["type"])
else:
if self.config["analyzeSPBAssociatedTracks"] == 1:
for strain in self.strains:
strain.TossFarTracks(self.config["SPBRegion"])
if self.config["analyzeSPBAssociatedTracks"] == 2:
for strain in self.strains:
strain.TossCloseTracks(self.config["SPBRegion"])
# Get all tracks
for strain in self.strains:
strain.GetTracks()
if self.config["analyzeSpindleIntensity"] is True:
self.GraphSpindleIntensity(self.strains)
self.Graph(self.strains)
os.chdir(self.cwd)
# }}}
# Graph {{{
def Graph(self, strains, gname=None):
# Graph useful properties
plt.rcParams.update({"font.size": 14})
plt.rc("legend", fontsize=12)
self.PlotTracksByState(k=1000)
self.PlotAllTracks()
# self.GraphStrain_EventsPerMinutePerCellViolin()
# self.GraphStrain_SwitchFrequencyPerCellViolin()
# self.GraphStrain_SwitchFrequencyPerCell()
self.GraphStrain_FractionMovement()
self.GraphStrain_SwitchFrequency2()
self.GraphStrain_EventsPerMinute2()
# self.GraphStrain_EventsPerMinutePerCell()
self.GraphHistComparison()
# self.GraphStrainMedianValues()
# self.GraphStrain_EventsPerMinutePerCellRaw()
# self.GraphStrain_SwitchFrequencyPerCellRaw()
# Scatter Plots
graphscatvars = (
['Run displacement','Intensity','nm','AU','scatter_intensity_runlength.pdf'],
# ['Velocity','Intensity',r'$\mu$m/min','AU','scatter_intensity_velocity.pdf'],
# ['Lifetime','Intensity','min','AU','scatter_intensity_lifetime.pdf'],
# [
# "Run length",
# "Velocity",
# r"$\mu$" + "m",
# "nm/s",
# "scatter_velocity_runlength.pdf",
# ],
# [
# "Run length",
# "Lifetime",
# r"$\mu$" + "m",
# "min",
# "scatter_lifetime_runlength.pdf",
# ],
['Velocity','Intensity','nm/s','AU','scatter_intensity_velocity.pdf'],
# ["Velocity", "Lifetime", "nm/s", "min", "scatter_lifetime_velocity.pdf"],
# [
# "Run length",
# "Average distance from SPB",
# r"$\mu$" + "m",
# r"$\mu$m",
# "scatter_avgSPBdistance_runlength.pdf",
# ],
# [
# "Velocity",
# "Average distance from SPB",
# "nm/s",
# r"$\mu$m",
# "scatter_avgSPBdistance_velocity.pdf",
# ],
# [
# "Lifetime",
# "Average distance from SPB",
# "min",
# r"$\mu$m",
# "scatter_avgSPBdistance_lifetime.pdf",
# ],
['Lifetime','Intensity','s','AU','scatter_intensity_lifetime.pdf'],
)
for x,y,xunit,yunit,figname in graphscatvars:
self.GraphStrainScatter( strains,x,y,xlab=x,ylab=y,xunit=xunit,yunit=yunit,figname=figname )
# self.GraphStrain_EventsPerMinute()
# self.GraphStrain_AvgStartEnd()
# self.GraphStrain_StateTimes()
# self.GraphStrain_SwitchCounts()
# self.GraphStrain_StateSwitchMatrix()
# }}}
# GraphHistComparison {{{
def GraphHistComparison(self):
def plot_median_special(ax, xloc, rel_height, col):
(ybottom, ytop) = ax.get_ylim()
ax.plot(
[xloc, xloc],
[ybottom, rel_height * ytop],
color=col,
linewidth=1.5,
alpha=0.3,
solid_capstyle="round",
)
ax.plot(
[xloc],
[rel_height * ytop],
marker="d",
color=col,
alpha=0.6,
markersize=6,
)
return ax
graphhistvars = (
[
"GetRunLengths",
"Run displacement",
"Count",
"nm",
"strain_runlength.pdf",
],
["GetVelocities", "Velocity", "Count", "nm/s", "strain_velocity.pdf"],
["GetLifetimes", "Lifetime", "Count", "s", "strain_lifetime.pdf"],
[
"GetAverageDistances",
"Average distance from SPB",
"Count",
"nm",
"strain_avg_pos.pdf",
],
)
# Special x limits
# xmaxes = {
# 'Run displacement': 1.6,
# 'Velocity': 100.0,
# 'Lifetime': 1.6,
# 'Average distance from SPB': 8.0,
# }
# if self.config['paperFigure'] == 5:
# xmaxes['Velocity'] = 60.0
xmaxes = {
"Run displacement": 1600,
"Velocity": 100.0,
"Lifetime": 100,
"Average distance from SPB": 8000,
}
ymax_scaling = {
"Run displacement": 2000,
"Velocity": 50,
"Lifetime": 100,
"Average distance from SPB": 10000,
}
if self.config["paperFigure"] == 5:
xmaxes["Velocity"] = 60.0
nStrain = len(self.strains)
for fcn, xlab, ylab, unit, figname in graphhistvars:
# Make a figure. Two axes (one for poleward, one for antipoleward)
fig, ax = plt.subplots(figsize=(6, 3))
cols1 = [cstrain.color for cstrain in self.strains]
cols2 = [cstrain.color for cstrain in self.strains]
# cols1 = [[68, 111, 200],[220, 95, 60]]
# cols1 = [tuple(np.array(x)/255) for x in cols1]
# cols2 = [[68, 111, 200],[220, 95, 60]]
# cols2 = [tuple(np.array(x)/255) for x in cols2]
# list for medians
medians = {}
original_stdout = (
sys.stdout
) # Save a reference to the original standard output
# Save to stats
print(os.getcwd())
with open("stats.txt", "a") as f:
sys.stdout = f # Change the standard output to the file we created.
print("-" * 30)
print("\nParameter = {0}".format(xlab))
sys.stdout = original_stdout # Reset the standard output to its original value # Display
# Make histograms for each strain
for strain, col1, col2 in zip(self.strains, cols1, cols2):
# Get data
funcData = getattr(strain, fcn)
dataPAP = funcData()
dataAll = np.hstack((-1 * np.array(dataPAP[0]), dataPAP[1]))
# bins and histogram
nbins = 16
bins = np.linspace(-1 * xmaxes[xlab], xmaxes[xlab], nbins + 1)
# ax.hist( dataAll, bins, density=True, edgecolor='k', alpha=0.6, color = col, label='{0} (N={1})'.format(strain.label, len(dataAll)))
if self.config["paperFigure"] == 7:
print("Skip WT histograms for TD cell")
else:
_, _, patches = ax.hist(
dataAll,
bins,
density=True,
edgecolor="white",
linewidth=1.0,
alpha=0.6,
color=col2,
)
for i in range(0, int(nbins / 2)):
patches[i].set_facecolor(col1)
patches[i].set_hatch("////")
# Draw y-axis in middle
ax.axvline(x=0, c="black", lw=1.5)
# Add medians info
medians[strain.label] = dict(
zip(
["P", "AP"],
[fac * np.median(db) for db, fac in zip(dataPAP, [-1, 1])],
)
)
# ax.hist( [], bins, edgecolor='white', linewidth=1.0, alpha=0.6, color = col2, label='{0} (N={1})'.format(strain.label, len(dataAll)))
ax.hist(
[],
bins,
edgecolor="white",
linewidth=1.0,
alpha=0.6,
color=col2,
label="{0}".format(strain.label),
)
# Print Info
# Save to stats
with open("stats.txt", "a") as f:
sys.stdout = f # Change the standard output to the file we created.
print("Strain: {0}".format(strain.label))
print("Poleward:")
print("\tN = {0}".format(len(dataPAP[0])))
print("\tMedian = {0:.3f} {1}".format(np.median(dataPAP[0]), unit))
print("\tMean = {0:.3f} {1}".format(np.mean(dataPAP[0]), unit))
print(
"\tStandard Dev = {0:.3f} {1}".format(np.std(dataPAP[0]), unit)
)
print(
"\tStandard Error = {0:.3f} {1}".format(
np.std(dataPAP[0]) / np.sqrt(len(dataPAP[0])), unit
)
)
print("Antipoleward:")
print("\tN = {0}".format(len(dataPAP[1])))
print("\tMedian = {0:.3f} {1}".format(np.median(dataPAP[1]), unit))
print("\tMean = {0:.3f} {1}".format(np.mean(dataPAP[1]), unit))
print(
"\tStandard Dev = {0:.3f} {1}".format(np.std(dataPAP[1]), unit)
)
print(
"\tStandard Error = {0:.3f} {1}".format(
np.std(dataPAP[1]) / np.sqrt(len(dataPAP[1])), unit
)
)
sys.stdout = original_stdout # Reset the standard output to its original value # Display
print("-" * 30)
# Get max y value (ceiled to the nearest .01)
ytop = 1.25 * max([pp.get_height() for pp in ax.patches])
ymax = math.ceil(ytop * ymax_scaling[xlab]) / (ymax_scaling[xlab])
ax.set_yticks([0, ymax / 2, ymax])
ax.set(xlabel="{0} ({1})".format(xlab, unit))
# Limits and ticks
ax.set_xlim(left=-1 * xmaxes[xlab], right=xmaxes[xlab])
ax.set_ylim(bottom=0, top=1.0 * ymax)
# Plot medians
for strain_name, col in zip(medians.keys(), cols1):
meds = medians[strain_name].values()
for med in meds:
ax = plot_median_special(ax, med, 0.9, col)
# Legend
if nStrain > 1 and xlab == "Velocity":
ax.legend(frameon=False, loc="upper left")
# ax.legend(frameon=False)
# Set ylabel
ax.set(ylabel="Probability density")
# XLABELS
if xlab == "Lifetime":
ax.set_xticks([-100, -50, 0, 50, 100])
ax.set_xticklabels(np.abs(ax.get_xticks()))
elif xlab == "Velocity":
if self.config["paperFigure"] == 5:
ax.set_xticks([-60, -30, 0, 30, 60])
else:
ax.set_xticks([-100, -50, 0, 50, 100])
elif xlab == "Average distance from SPB":
ax.set_xticks([-8000, -4000, 0, 4000, 8000])
ax.ticklabel_format(
style="sci", axis="y", scilimits=(0, 0), useMathText=True
)
ax.set_xticklabels(np.abs(ax.get_xticks()))
elif xlab == "Run displacement":
ax.set_xticks([-1600, -800, 0, 800, 1600])
ax.ticklabel_format(
style="sci", axis="y", scilimits=(0, 0), useMathText=True
)
plt.tight_layout()
plt.savefig(figname)
plt.close()
# }}}
# GraphStrain_SwitchFrequency2 {{{
def GraphStrain_SwitchFrequency2(self, figname="graph_switch_frequency.pdf"):
# Graph comparison bar plot for events per minute
strains = [strain.label for strain in self.strains]
# Data
n_events = np.zeros((len(self.strains), 2))
dt = 0 * n_events
for idx, strain in enumerate(self.strains):
events, times = strain.GetSwitchFrequencyPerMinutePerCell()
for je, jt in zip(events["P"], times["P"]):
n_events[idx, 0] += je
dt[idx, 0] += jt * 60
for je, jt in zip(events["AP"], times["AP"]):
n_events[idx, 1] += je
dt[idx, 1] += jt * 60
events_per_min = n_events / dt
events_per_min_err = np.sqrt(n_events) / dt
df = pd.DataFrame(
{"Poleward": events_per_min[:, 0], "AntiPoleward": events_per_min[:, 1]},
index=strains,
)
# Plot
fig, ax = plt.subplots(figsize=(4, 3))
ax = df.plot(
kind="bar",
ax=ax,
color=["Green", "Red"],
rot=0,
yerr=events_per_min_err,
error_kw=dict(ecolor="k"),
legend=False,
)
ax.set_ylabel("Switching frequency\n(events/sec)")
ax.set_xlabel("")
num_cells = [0, 0]
for idx in range(len(strains)):
for kymo in self.strains[idx].kymographs:
if kymo.poles != []:
tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0]
if tt > 10:
num_cells[idx] += 1
original_stdout = sys.stdout # Save a reference to the original standard output
with open("stats.txt", "a") as f:
sys.stdout = f # Change the standard output to the file we created.
print("------------------------------")
print("\nSwitching Frequency\n")
for idx, strain in enumerate(strains):
print("Strain: {0}".format(strain))
print(" Num Cells: {0}".format(num_cells[idx]))
print(" Poleward Exit")
print(" N Events: {0}".format(n_events[idx, 0]))
print(" Total Time: {0:.3f}".format(dt[idx, 0]))
print(
" Switching Freq: {0:.5f} sec^-1".format(
events_per_min[idx, 0]
)
)
print(
" Error in switching freq: {0:.5f} sec^-1".format(
events_per_min_err[idx, 0]
)
)
print(" AntiPoleward")
print(" N Events: {0}".format(n_events[idx, 1]))
print(" Total Time: {0:.3f}".format(dt[idx, 1]))
print(
" Switching Freq: {0:.5f} sec^-1".format(
events_per_min[idx, 1]
)
)
print(
" Error in switching freq: {0:.5f} sec^-1".format(
events_per_min_err[idx, 1]
)
)
print("------------------------------")
sys.stdout = original_stdout # Reset the standard output to its original value # Display
# Set y axis limit and ticks (ceil to nearest 0.02)
try:
if self.config["paperFigure"] == 2:
# ax.set_ylim(top=2.4)
ax.set_ylim(top=0.024)
elif self.config["paperFigure"] == 3:
# ax.set_ylim(top=5.0)
ax.set_ylim(top=0.06)
elif self.config["paperFigure"] == 4:
# ax.set_ylim(top=5.0)
ax.set_ylim(top=0.06)
elif self.config["paperFigure"] == 5:
# ax.set_ylim(top=2.4)
ax.set_ylim(top=0.024)
else:
raise exception("unkown value for paperfigure parameter")
ymax = ax.get_ylim()[1]
except:
ymax = np.max((data[:, :2] + data[:, 2:]).flatten())
# ymax = math.ceil(ax.get_ylim()[1]*50)/50
ymax = math.ceil(ymax * 50) / 50
ax.set_ylim(top=1.5 * ymax)
ax.set_yticks([0, ymax / 2, ymax])
ax.set_ylim(bottom=0.0)
ax.set(xlabel=None)
# Scientific notation
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0), useMathText=True)
# Set custom patch colors (Poleward_strain1, Poleward_strain2, AntiP_streain1, AntiP_strain2)
if len(self.strains) == 1:
c1 = self.strains[0].color
# c1 = [68, 111, 200]
cols = [c1, c1]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1]
cols = [tuple(np.array(x) / 255) for x in cols]
labels = ["Poleward", "Antipoleward"]
hatching = ["////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
elif len(self.strains) == 2:
c1 = self.strains[0].color
c2 = self.strains[1].color
# c1 = [68, 111, 200]
# c2 = [220, 95, 60]
cols = [c1, c2, c1, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1, c2, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
labels = ["Poleward", "Antipoleward", "Poleward", "Antipoleward"]
hatching = ["////", "", "////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
else:
raise Exception("only coded for 1 or 2 strains")
# ax.legend(handles, labels, loc='upper left', frameon=False)
ax.legend("", frameon=False)
plt.tight_layout()
fig.savefig(figname)
plt.close()
# }}}
# GraphStrain_EventsPerMinute2 {{{
def GraphStrain_EventsPerMinute2(self, figname="graph_events_per_second.pdf"):
# Graph comparison bar plot for events per minute
strains = [strain.label for strain in self.strains]
# Data
n_events = np.zeros((len(self.strains), 2))
dt = 0 * n_events
for idx, strain in enumerate(self.strains):
events, times = strain.GetDirectionalEventsPerMinutePerCell()
for je, jt in zip(events["P"], times["P"]):
n_events[idx, 0] += je
# convert times to seconds
dt[idx, 0] += jt * 60
for je, jt in zip(events["AP"], times["AP"]):
n_events[idx, 1] += je
dt[idx, 1] += jt * 60
events_per_min = n_events / dt
events_per_min_err = np.sqrt(n_events) / dt
df = pd.DataFrame(
{"Poleward": events_per_min[:, 0], "AntiPoleward": events_per_min[:, 1]},
index=strains,
)
# Plot
fig, ax = plt.subplots(figsize=(4, 3))
ax = df.plot(
kind="bar",
ax=ax,
color=["Green", "Red"],
rot=0,
yerr=events_per_min_err,
error_kw=dict(ecolor="k"),
legend=False,
)
ax.set_ylabel("Directional events \n per second")
ax.set_xlabel("")
num_cells = [0, 0]
for idx in range(len(strains)):
for kymo in self.strains[idx].kymographs:
if kymo.poles != []:
tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0]
if tt > 10:
num_cells[idx] += 1
original_stdout = sys.stdout # Save a reference to the original standard output
with open("stats.txt", "a") as f:
sys.stdout = f # Change the standard output to the file we created.
print("------------------------------")
print("\nEvents per second\n")
for idx, strain in enumerate(strains):
print("Strain: {0}".format(strain))
print(" Num Cells: {0}".format(num_cells[idx]))
print(" Poleward")
print(" N Events: {0}".format(n_events[idx, 0]))
print(" Total Time: {0:.3f} sec".format(dt[idx, 0]))
print(
" Events per sec: {0:.5f} sec^-1".format(
events_per_min[idx, 0]
)
)
print(
" Error in events per sec: {0:.5f} sec^-1".format(
events_per_min_err[idx, 0]
)
)
print(" AntiPoleward")
print(" N Events: {0}".format(n_events[idx, 1]))
print(" Total Time: {0:.3f} sec".format(dt[idx, 1]))
print(
" Events per sec: {0:.5f} sec^-1".format(
events_per_min[idx, 1]
)
)
print(
" Error in events per sec: {0:.5f} sec^-1".format(
events_per_min_err[idx, 1]
)
)
print("------------------------------")
sys.stdout = original_stdout # Reset the standard output to its original value # Display
# Set y axis limit and ticks (ceil to nearest 0.02)
try:
if self.config["paperFigure"] == 2:
# ax.set_ylim(top=0.8)
ax.set_ylim(top=0.014)
elif self.config["paperFigure"] == 3:
# ax.set_ylim(top=1.8)
ax.set_ylim(top=0.03)
elif self.config["paperFigure"] == 4:
# ax.set_ylim(top=1.8)
ax.set_ylim(top=0.03)
elif self.config["paperFigure"] == 5:
# ax.set_ylim(top=0.8)
ax.set_ylim(top=0.014)
else:
raise exception("unkown value for paperfigure parameter")
ymax = ax.get_ylim()[1]
except:
ymax = np.max((data[:, :2] + data[:, 2:]).flatten())
# ymax = math.ceil(ax.get_ylim()[1]*50)/50
ymax = math.ceil(ymax * 50) / 50
ax.set_ylim(top=1.5 * ymax)
ax.set_yticks([0, ymax / 2, ymax])
ax.set_ylim(bottom=0.0)
ax.set(xlabel=None)
# Scientific notation
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0), useMathText=True)
if len(self.strains) == 1:
c1 = self.strains[0].color
# c1 = [68, 111, 200]
cols = [c1, c1]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1]
cols = [tuple(np.array(x) / 255) for x in cols]
labels = ["Poleward", "Antipoleward"]
hatching = ["////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
elif len(self.strains) == 2:
c1 = self.strains[0].color
c2 = self.strains[1].color
# c1 = [68, 111, 200]
# c2 = [220, 95, 60]
cols = [c1, c2, c1, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1, c2, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
labels = ["Poleward", "Antipoleward", "Poleward", "Antipoleward"]
hatching = ["////", "", "////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
else:
raise Exception("only coded for 1 or 2 strains")
ax.legend(handles, labels, loc="upper left", frameon=False)
plt.tight_layout()
fig.savefig(figname)
plt.close()
# }}}
# GraphStrain_FractionMovement {{{
def GraphStrain_FractionMovement(self, figname="graph_fraction_kymo_movement.pdf"):
# Graph comparison bar plot for events per minute
fracMove = [
strain.GetFractionKymographsWithMovement() for strain in self.strains
]
n_total = [len(strain.kymographs) for strain in self.strains]
n_move = [int(jp * np) for jp, np in zip(fracMove, n_total)]
strains = [strain.label for strain in self.strains]
# Colors
cols1 = [cstrain.color for cstrain in self.strains]
num_cells = [0, 0]
for idx in range(len(strains)):
for kymo in self.strains[idx].kymographs:
if kymo.poles != []:
tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0]
if tt > 10:
num_cells[idx] += 1
original_stdout = sys.stdout # Save a reference to the original standard output
with open("stats.txt", "a") as f:
sys.stdout = f # Change the standard output to the file we created.
print("------------------------------")
print("\nFraction kymograph movement\n\n")
for idx, strain in enumerate(strains):
print(" Strain: {0}".format(strain))
print(" Percentage: {0:.3f}".format(fracMove[idx]))
print(" N: {0}\n".format(num_cells[idx]))
print("------------------------------")
sys.stdout = original_stdout # Reset the standard output to its original value # Display
# Bar plot
fig, ax = plt.subplots(figsize=(4, 3))
ax.bar(strains, fracMove, color=cols1, width=0.5, alpha=0.6)
# ax.set_xlabel("Strain")
ax.set_ylabel("Fraction of cells\nwith movement")
ax.set_ylim(top=1.0)
ax.set_yticks([0.0, 0.5, 1.0])
ax.set_xlim(left=-0.75, right=len(strains) - 1 + 0.75)
handles = [
plt.Rectangle((0, 0), 1, 1, color=cols1[idx], alpha=0.6)
for idx in range(len(strains))
]
# plt.legend(handles, strains, loc='upper left', frameon=False)
plt.tight_layout()
fig.savefig(figname)
plt.close()
# }}}
# GraphStrainMedianValues{{{
def GraphStrainMedianValues(self, figname="graph_median_lifetime.pdf"):
# Graph comparison bar plot for median lifetime
graphhistvars = (
[
"GetRunLengths",
"Run displacement",
r"$\mu$" + "m",
"strain_median_runlength.pdf",
],
[
"GetVelocities_nm_per_sec",
"Velocity",
"nm/s",
"strain_median_velocity.pdf",
],
["GetLifetimes_min", "Lifetime", r"min", "strain_median_lifetime.pdf"],
[
"GetAverageDistances",
"Average distance from SPB",
r"$\mu$" + "m",
"strain_median_avg_pos.pdf",
],
)
for fcn, ylab, unit, figname in graphhistvars:
# Data
# Row: strains
# Col: Poleward Mean, AntiPoleward Mean, Poleward STD, Antipoleward STD
data = np.zeros((len(self.strains), 4))
count = np.zeros(len(self.strains))
for idx, strain in enumerate(self.strains):
funcData = getattr(strain, fcn)
events = funcData()
count[idx] = len(strain.kymographs)
data[idx, 0] = np.mean(events[0])
data[idx, 2] = np.std(events[0]) / np.sqrt(count[idx])
data[idx, 1] = np.mean(events[1])
data[idx, 3] = np.std(events[1]) / np.sqrt(count[idx])
strains = [strain.label for strain in self.strains]
# Create pd Dataframe for plotting
df = pd.DataFrame(
data,
columns=["Poleward", "Antipoleward", "std_P", "std_AP"],
index=strains,
)
# Plot
fig, ax = plt.subplots(figsize=(4, 3))
# convert the std columns to an array
yerr = df[["std_P", "std_AP"]].to_numpy().T
ax = df[["Poleward", "Antipoleward"]].plot(
kind="bar",
ax=ax,
color=["Green", "Red"],
rot=0,
# yerr=yerr, error_kw=dict(ecolor='k'),legend=False, xlabel=None)
legend=False,
xlabel=None,
)
# ax.set_xlabel("Strain")
ax.set_ylabel("Median\n{0}\n({1})".format(ylab, unit))
# Set y axis limit and ticks (ceil to nearest 0.02)
if ylab == "Velocity": # nearest 4
ymax = np.max((data[:, :2] + data[:, 2:]).flatten())
ymax = math.ceil(ymax / 4) * 4
else:
ymax = np.max((data[:, :2] + data[:, 2:]).flatten())
# ymax = math.ceil(ax.get_ylim()[1]*50)/50
ymax = math.ceil(ymax * 50) / 50
ax.set_ylim(top=1.4 * ymax)
ax.set_yticks([0, ymax / 2, ymax])
# for jj in range(2):
# ax.text(jj, ymax, 'N cells = {0}'.format(count[jj]),
# ha='center', color='black', fontsize=8)
# Set custom patch colors (Poleward_strain1, Poleward_strain2, AntiP_streain1, AntiP_strain2)
if len(self.strains) == 1:
c1 = self.strains[0].color
# c1 = [68, 111, 200]
cols = [c1, c1]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1]
# cols = [tuple(np.array(x)/255) for x in cols]
labels = ["Poleward", "Antipoleward"]
hatching = ["////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
elif len(self.strains) == 2:
c1 = self.strains[0].color
c2 = self.strains[1].color
# c1 = [68, 111, 200]
# c2 = [220, 95, 60]
cols = [c1, c2, c1, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
for idx, (pp, col) in enumerate(zip(ax.patches, cols)):
pp.set_facecolor(col)
pp.set_alpha(0.6)
pp.set_edgecolor("white")
if idx < len(strains):
pp.set_hatch("////")
cols = [c1, c1, c2, c2]
# cols = [tuple(np.array(x)/255) for x in cols]
labels = [
"Poleward, {0}".format(strains[0]),
"Antipoleward, {0}".format(strains[0]),
"Poleward, {0}".format(strains[1]),
"Antipoleward, {0}".format(strains[1]),
]
hatching = ["////", "", "////", ""]
handles = [
matplotlib.patches.Rectangle(
(0, 0),
1,
1,
facecolor=cols[idx],
alpha=0.6,
label=labels[idx],
hatch=hatching[idx],
edgecolor="white",
)
for idx in range(len(labels))
]
else:
raise Exception("only coded for 1 or 2 strains")
ax.legend(handles, labels, loc="upper left", frameon=False)
plt.tight_layout()
fig.savefig(figname)
plt.close()
# }}}
# GraphStrain_AvgStartEnd {{{
def GraphStrain_AvgStartEnd(self, figname="graph_fraction_kymo_movement.pdf"):
freqP = [strain.GetFractionKymographsWithMovement() for strain in self.strains]
strains = [strain.label for strain in self.strains]
# Create pd Dataframe for plotting
seriesP = pd.Series(freqP, index=strains)
# Plot
fig, ax = plt.subplots(figsize=(6, 4))
df = pd.DataFrame({"Movements": seriesP})
df.plot.bar(ax=ax, color=["RebeccaPurple"], rot=0)
ax.set_xlabel("Strain")
ax.set_ylabel("Fraction of Cells\nwith Movement")
plt.tight_layout()
fig.savefig(figname)
plt.close()
# }}}
# GraphStrainScatter {{{
def GraphStrainScatter(
self,
strains,
x,
y,
xlab=None,
ylab=None,
xunit="",
yunit="",
figname="scatter.pdf",
):
# Special x limits
xmaxes = {
"Run displacement": 1600,
"Velocity": 60.0,
"Lifetime": 80,
"Intensity": 1000,
}
# 2 axes. Poleward and antipoleward
fig, axs = plt.subplots(1, 2, figsize=(6, 3), sharey=True)
cols = sns.color_palette("husl", len(strains))
directions = ["Poleward", "Antipoleward"]
for strain, c in zip(strains, cols):
if x == "Intensity":
xx = strain.GetIntensities()
elif x == "Run displacement":
xx = strain.GetRunLengths()
elif x == "Velocity":
xx = strain.GetVelocities()
elif x == "Lifetime":
xx = strain.GetLifetimes()
# elif x == "Average distance from SPB":
# xx = strain.GetAverageDistances()
if y == "Intensity":
yy = strain.GetIntensities()
elif y == "Run displacement":
yy = strain.GetRunLengths()
elif y == "Velocity":
yy = strain.GetVelocities()
elif y == "Lifetime":
yy = strain.GetLifetimes()
# elif y == "Average distance from SPB":
# yy = strain.GetAverageDistances()
for idx, ax in enumerate(axs):
ax.scatter(
xx[idx],
yy[idx],
s=12,
alpha=0.8,
color=c,
edgecolors="none",
label=strain.label,
)
ax.set_title(directions[idx])
# ax.grid(True)
if xlab is not None:
axs[0].set_xlabel("{0} ({1})".format(xlab, xunit))
axs[1].set_xlabel("{0} ({1})".format(xlab, xunit))
if ylab is not None:
axs[0].set_ylabel("{0} ({1})".format(ylab, yunit))
for ax in axs:
ax.legend()
ax.set_xlim(left=0, right=xmaxes[x])
ax.set_ylim(bottom=0, top=xmaxes[y])
plt.tight_layout()
plt.savefig("scatter_{0}_{1}.pdf".format(x, y))
plt.close()
# }}}
# GraphSpindleIntensity {{{
def GraphSpindleIntensity(self, strains, lrange=[1, 10], gname=None):
# Graph spindle intensity between poles
intStrain = np.zeros((len(strains), 100))
cols = sns.color_palette("husl", len(strains))
xran = np.linspace(0, 1, 100)
for k, strain in enumerate(strains):
fig, ax = plt.subplots(figsize=(9, 6))
# Find spindle intensities for all kymographs
intensities = None
for i, kymo in enumerate(strain.kymographs):
intense = kymo.FindIntensityAlongSpindle(lrange=lrange)
if intense is not None:
if intensities is None:
intensities = np.mean(intense, axis=0)
else:
intensities = np.vstack((intensities, np.mean(intense, axis=0)))
try:
intStrain[k, :] = np.mean(intensities, axis=0)
except:
pdb.set_trace()
print("1")
# Plot
for row in intensities:
ax.plot(xran, row, color="blue")
ax.plot(xran, np.mean(intensities, axis=0), color="red", linewidth=4)
ax.set_ylabel("Intensity (AU)")
ax.set_xlabel("Position along spindle (normalized)")
ax.set_title("Cut7 intensity - {0}".format(strain.label))
fig.savefig("spindle_intensity_{0}.pdf".format(strain.label))
plt.close()
# Make a comparison figure
fig, ax = plt.subplots(figsize=(9, 6))
for strn, row, c in zip(strains, intStrain, cols):
ax.plot(xran, row, color=c, linewidth=4, label=strn.label)
ax.set_ylabel("Intensity (AU)")
ax.set_xlabel("Position along spindle (normalized)")
ax.set_title("Cut7 intensity")
ax.legend()
figname = "spindle_intensity_all.pdf"
if gname is not None:
figname = figname[:-4] + "_{0}.pdf".format(gname)
fig.suptitle(gname)
fig.savefig(figname)
plt.close()
# }}}
# PlotTracksByState {{{
def PlotTracksByState(self, k=5):
# Plot individual curved tracks with poles
# Plot all tracks overlayed without poles
cols = {
"Inactive": "blue",
"Poleward": "green",
"Antipoleward": "red",
}
for strain in self.strains:
strain.PlotTrackByStates(cols, k=k)
# }}}
# PlotAllTracks {{{
def PlotAllTracks(self):
# Plot all tracks
cols = {
"Inactive": "blue",
"Poleward": "green",
"Antipoleward": "red",
}
for strain in self.strains:
strain.PlotAllTracks(cols)
# }}}
# DisplayTracksStatistics {{{
def DisplayTracksStatistics(self):
# Display statistics about the tracks
print("------------------------------------------")
print("------------------------------------------")
print("------------ Track Statistics ------------")
print("------------------------------------------\n")
print("Number of tracks:")
for strain in self.strains:
print(" {0} : {1}\n".format(strain.label, len(strain.tracks)))
print("------------------------------------------")
print("------------------------------------------")
# }}}
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values - average) ** 2, weights=weights)
std = np.sqrt(variance)
serr = std / np.sqrt(len(values))
return average, std, serr
#########################################################
if __name__ == "__main__":
x = KymographAnalysis()
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,627
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/Track.py
|
#!/usr/bin/env python
import os, pdb
import numpy as np
from scipy import interpolate, signal
from .node_graph import Graph
import matplotlib.pyplot as plt
import math
import uuid
# Superclass for Poles and tracks that stores positional and intensity information
class Feature:
def __init__(self, time, position, image, time_step=1):
self.time = np.array( time )
self.position = np.array( position )
self.id = uuid.uuid1()
self.time_step = time_step
self.pixel_time = self.time / self.time_step
self.image = image
# Resample data
self.ResampleData()
def ResampleData( self, sample_factor=3):
# resample data based on time pixels
# Define an interpolation function for positions
ifunc_pos = interpolate.interp1d( self.time, self.position, kind='linear')
# Define a grid of resampled time points
self.time = np.linspace( self.time[0], self.time[-1], int( np.floor(max([ 2, sample_factor*(self.time[-1]-self.time[0])])) ))
if len(self.time) == 1:
pdb.set_trace()
print('oops')
self.position = ifunc_pos( self.time)
# Class for a Pole
class Pole(Feature):
def __init__(self, time, position, image=[], time_step=1):
Feature.__init__(self, time, position, image, time_step=time_step)
# Define an interpolation/extrapolation function
# self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value='extrapolate')
self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value=(self.position[0], self.position[-1]), bounds_error=False)
def Print(self):
print('Pole :')
print(' ID : {}'.format(self.id))
print(' Time : {}'.format( self.time))
print(' Position : {}'.format( self.position))
print('--------------------------------- ')
def TrimBasedOnTime(self, time_keep):
# Trim the pole to be inside the time range specified
if np.all(time_keep == -1):
return np.nan
# Check if track exists between those times
start_before = (self.time[0] < time_keep[0])
start_after = (self.time[0] > time_keep[1])
end_before = (self.time[-1] < time_keep[0])
end_after = (self.time[-1] > time_keep[1])
if start_before and end_before:
return np.nan
elif start_after and end_after:
return np.nan
# Get indices of times
idx = np.argwhere( (self.time > time_keep[0]) & (self.time < time_keep[1]) ).T[0].tolist()
if len(idx) < 3:
return None
idx = range( idx[0], idx[-1]+1)
# Create the new trimmed pole
polenew = Pole( self.time[idx], self.position[idx], self.image, time_step=self.time_step)
# print(time_keep)
# print(polenew.time[0])
# print(polenew.time[-1])
return polenew
# Class for a Track: additionally stores associated poles and track direction
class Track(Feature):
def __init__(self, time, position, image, poles, direction, line_type, time_step=1, pos_step=1, kymo_file=None):
Feature.__init__(self, time, position, image, time_step=time_step)
self.poles = poles
self.direction = direction
self.line_type = line_type
self.pos_step = pos_step
self.kymo_file = kymo_file
def CalcPositionPoleCurrent(self):
# Get pole position at the current time (i.e at the times of the track) by using the interpolation/extrapolation function of the pole
pos = np.zeros( (len(self.poles), np.size(self.position) ) )
for idx, pole in enumerate( self.poles) :
pos[idx,:] = np.array( pole.ifunc( self.time) )
return pos
def CalcPositionRelative(self):
# Calculate track position relative to the pole
pole = self.CalcPositionPoleCurrent()
pos = np.zeros( np.shape(pole) )
for idx, ele in enumerate( pole):
pos[idx,:] = np.abs( np.array( self.position - ele) )
return pos
def CalcVelocity(self):
# Calculate the velocity of this linear track
pos = self.CalcPositionRelative()
# Find Velocity
vel = np.zeros( (len(self.poles)) )
for idx in range( len(self.poles)):
vel[idx] = np.average( np.absolute( np.divide( np.diff( pos[idx,:]) , np.diff( self.time) ) ), weights = np.diff(self.time) )
return vel
def CalcSpindleLength(self):
# Calculate the spindle length
if len(self.poles) != 2:
return
# Find the distance between the poles for the extent of this track
leng = np.absolute( self.poles[0].ifunc( self.time) - self.poles[1].ifunc( self.time) )
return leng
def CalcIntensity( self):
# Interpolate to find the mean intensity of the track
dimT = np.shape( self.image)[0]
dimX = np.shape( self.image)[1]
f = interpolate.interp2d( self.pos_step*np.arange(0,dimX), self.time_step*np.arange(0,dimT), self.image)
intense = f(self.position, self.time)
return np.mean(intense)
def CheckViability(self):
# Check track time is always increasing
if np.any( np.diff( self.time) <= 0 ):
return 0
return 1
def OrderPoles(self):
# Order the poles with the first one being the closest one to the start of the track
if len(self.poles) != 2:
return
pos = self.CalcPositionRelative()
if np.absolute( pos[1,0] ) < np.absolute( pos[0,0]):
self.poles = [self.poles[1], self.poles[0]]
def Trim(self, lrange):
# Trim the track to be inside the range specified
if len( self.poles) == 1:
return self
if lrange is None:
return self
# Get indices of times when spindle length is between the given range values
lens = self.CalcSpindleLength()
idx = np.argwhere( (lens > lrange[0]) & (lens < lrange[1]) ).T[0].tolist()
if len(idx) < 3:
return None
idx = range( idx[0], idx[-1]+1)
# Create the new trimmed track
tracknew = Track( self.time[idx], self.position[idx], self.image, self.poles, self.direction, self.line_type, time_step=self.time_step, pos_step=self.pos_step)
return tracknew
def TrimBasedOnTime(self, time_keep):
# Trim the track to be inside the time range specified
if np.all(time_keep == -1):
return np.nan
# Check if track exists between those times
start_before = (self.time[0] < time_keep[0])
start_after = (self.time[0] > time_keep[1])
end_before = (self.time[-1] < time_keep[0])
end_after = (self.time[-1] > time_keep[1])
if start_before and end_before:
return np.nan
elif start_after and end_after:
return np.nan
# Get indices of times
idx = np.argwhere( (self.time > time_keep[0]) & (self.time < time_keep[1]) ).T[0].tolist()
if len(idx) < 3:
return None
idx = range( idx[0], idx[-1]+1)
# Create the new trimmed track
tracknew = Track( self.time[idx], self.position[idx], self.image, self.poles, self.direction, self.line_type, time_step=self.time_step, pos_step=self.pos_step)
if tracknew is None:
pdb.set_trace()
print('b')
return tracknew
def SplitTrack(self, ipole=0, cutoff=0.003):
# Spit curved track into multiple mini unidirectional segments
# cutoff : units micron/sec
switches = {
'P' : { 'P' : 0, 'AP': 0, 'I' : 0,},
'AP' : { 'P' : 0, 'AP': 0, 'I' : 0,},
'I' : { 'P' : 0, 'AP': 0, 'I' : 0,},
}
# If linear directional track, cant split, so exit
if self.direction != 'Ambiguous':
return [self], switches
# If linear ambiguous track, figure out direction, then exit
if self.line_type == 'Line' and self.direction == 'Ambiguous':
if len(self.CalcPositionRelative()) == 0:
pdb.set_trace()
print('a')
position = np.absolute( self.CalcPositionRelative()[ipole,:] )
vel = np.mean( np.divide( np.diff( position) , np.diff(self.time) ) )
if abs( vel) < cutoff:
self.direction = 'Inactive'
elif vel > 0:
self.direction = 'Antipoleward'
elif vel < 0:
self.direction = 'Poleward'
return [self], switches
# Get track position relative to the pole
position = np.absolute( self.CalcPositionRelative()[ipole,:] )
# Use a rolling window to find velocities
vel = FindGradientRollingWindow( position, self.time, window=16)
# Assign states based on value of velocity at each timestep
states = []
for v in vel:
if abs( v) < cutoff:
states += ['I']
elif v > 0:
states += ['AP']
elif v < 0:
states += ['P']
# set first state to second state. last state to second last state
states[0] = states[1]
states[-1] = states[-2]
# Remove singly occuring states
for i, state in enumerate(states):
if i>0 and i< len(states)-1:
if state != states[i-1] and state != states[i+1]:
states[i] = states[i-1]
# Count switches and get track indices
p_state = 'XXX'
track = { 'pos': [], 'time': [], 'dir':[] }
idx = [0 , 0]
for cnt, st in enumerate(states):
if cnt == 0:
p_state = st
idx[0] = 0
continue
if st == p_state:
idx[1] += 1
if st != p_state:
# store old stuff
pos = self.position[ idx[0]: idx[1]+2]
# pos.tolist()
time = self.time[ idx[0]: idx[1]+2]
# time.tolist()
track['pos'] += [pos]
track['time'] += [time]
track['dir'] += [p_state]
p_state = st
# begin new
idx[0] = cnt
idx[1] = cnt
# Store the last info
if cnt == len(states)-1:
pos = self.position[ idx[0]: idx[1]+1]
# pos.tolist()
time = self.time[ idx[0]: idx[1]+1]
# time.tolist()
track['pos'] += [pos]
track['time'] += [time]
track['dir'] += [p_state]
# record switches
for cnt, dd in enumerate( track['dir']):
if cnt == 0:
continue
switches[ track['dir'][cnt-1]][track['dir'][cnt]] += 1
# Create track objects from the information
segments = []
for time, pos, direc in zip( track['time'], track['pos'], track['dir']):
if direc is 'P':
direction = 'Poleward'
elif direc is 'AP':
direction = 'Antipoleward'
elif direc is 'I':
direction = 'Inactive'
pos = pos.tolist()
time = time.tolist()
segments += [Track( time, pos, self.image, self.poles, direction, 'Line', time_step=self.time_step, pos_step=self.pos_step, kymo_file=self.kymo_file)]
return segments, switches
def DisplayTrack(self, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(6,6))
# Display kymograph image
ax.imshow( self.image)
ax.plot( self.position/self.pos_step, self.time/self.time_step, color='red')
def Print(self):
print('Feature :')
print(' ID : {}'.format(self.id))
print(' Direction : {}'.format( self.direction))
print(' Line type : {}'.format( self.line_type))
print(' Time : {}'.format( self.time))
print(' Position : {}'.format( self.position))
print('--------------------------------- ')
def CountSwitches( states, switches):
# Given a list of
dt = np.mean( np.diff( t) )
nHalfWindow = int( np.ceil( t_window / (2*dt)) )
for i in range(len(t)):
# get upper lower indices of window
i_lb = max( [ 0, i-nHalfWindow])
i_ub = min( [ len(t), i+nHalfWindow])
# Find gradient
diff = lambda xx : np.diff( xx[i_lb:i_ub])
grad = np.mean( np.divide( diff(x), diff(t) ) )
return grad
def FindGradientRollingWindow( x, t, window=6):
dt = np.mean( np.diff( t) )
nHalfWindow = int( np.ceil( window / (2*dt)) )
grads = []
for i in range(len(t)):
# get upper lower indices of window
i_lb = max( [ 0, i-nHalfWindow])
i_ub = min( [ len(t), i+nHalfWindow])
# Find gradient
diff = lambda xx : np.diff( xx[i_lb:i_ub])
grads += [np.mean( np.divide( diff(x), diff(t) ) )]
return grads
if __name__ == "__main__":
print('Not implemented')
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,628
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/node_graph.py
|
#!/usr/bin/env python
# Python program to print connected
# components in an undirected graph
# This code is contributed by Abhishek Valsan
# Updated by Saad Ansari for a directed graph application
import pdb
class Graph:
# init function to declare class variables
def __init__(self,V):
self.V = V
self.next = [[] for i in range(V)]
self.prev = [[] for i in range(V)]
def VisitNext(self, temp, v, visited):
# Visited this node
visited[v] = True
# Store the vertex to list
try:
if temp[-1] != v:
temp.append(v)
except:
temp.append(v)
# Repeat for all vertices adjacent
# to this vertex v
for i in self.next[v]:
if visited[i] == False:
# Update the list
temp= self.VisitNext(temp, i, visited)
return temp
def VisitPrev(self, temp, v, visited):
# Visited this node
visited[v] = True
# Store the vertex to list
try:
if temp[0] != v:
temp.insert(0, v)
except:
temp.insert(0, v)
# Repeat for all vertices adjacent
# to this vertex v
for i in self.prev[v]:
if visited[i] == False:
# Update the list
temp= self.VisitPrev(temp, i, visited)
return temp
# method to add an directed edge
def addEdge(self, v, w):
self.next[v].append(w)
self.prev[w].append(v)
# Method to retrieve connected components
# in a directed graph
def connectedComponents(self):
visited = []
cc = []
for i in range(self.V):
visited.append(False)
for v in range(self.V):
if visited[v] == False:
temp = []
temp= self.VisitNext( temp, v, visited)
cc.append( self.VisitPrev( temp, v, visited) )
return cc
# Driver Code
if __name__=="__main__":
# Create a graph given in the above diagram
# 5 vertices numbered from 0 to 4
g = Graph(7);
g.addEdge(1, 0)
g.addEdge(3, 4)
g.addEdge(0, 6)
g.addEdge(5, 1)
cc = g.connectedComponents()
print("Following are connected components")
print(cc)
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,629
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/ReadFiles.py
|
#!/usr/bin/env python
import os, pdb
import math
import numpy as np
from .Track import *
import matplotlib.pyplot as plt
def ReadTxt( fname, verbose=0):
# Read data from files and parse into general, poles and feature information
if verbose:
PrintFile( fname)
# Initialize lists
geninfo = []
polesinfo = []
featureinfo = []
# Add General Information
with open(fname) as fp:
addLine = None
for cnt, line in enumerate(fp):
if line.find( 'General Information') > -1:
addLine = 'G'
if line.find( 'Poles Information') > -1:
addLine = 'P'
if line.find( 'Feature Information') > -1:
addLine = 'F'
# Add General Information
if addLine == 'G':
geninfo.append( line)
# Add Poles Information
elif addLine == 'P':
polesinfo.append( line)
# Add Feature Information
elif addLine == 'F':
featureinfo.append( line)
# Parse information
general = ParseGeneralInfo( fname, geninfo)
poles = ParsePolesInfo( polesinfo, general)
tracks = ParseTracksInfo( featureinfo, poles, general)
if polesinfo == []:
pdb.set_trace()
hi = 1
return general, poles, tracks
def ParseGeneralInfo( fname, geninfo):
# Parse information about general information
general = {
'path_tiff' : [],
'type' : [],
'time_start' : [],
'time_end' : [],
'time_step' : [],
'n_poles' : [],
'n_tracks': [],
'image': [],
}
for line in geninfo:
# Tiff Path
path_tiff = FindSingleSubstring( line, 'Tiff path : ')
if path_tiff is not None:
general['path_tiff'] = path_tiff
# Spindle Type
typ = FindSingleSubstring( line, 'Spindle type : ')
if typ is not None:
general['type'] = typ
# Time Start
time_start = FindNumbers( line, 'Start time (s) : ')
if time_start is not None:
general['time_start'] = time_start
# Time End
time_end = FindNumbers( line, 'End time (s) : ')
if time_end is not None:
general['time_end'] = time_end
# Time Step
time_step = FindNumbers( line, 'Time step (s) : ')
if time_step is not None:
general['time_step'] = time_step[0]
# Num Poles
npoles = FindNumbers( line, 'Num poles : ')
if npoles is not None:
general['n_poles'] = int( npoles[0])
# Num Tracks
ntracks = FindNumbers( line, 'Num tracks : ')
if ntracks is not None:
general['n_tracks'] = int( ntracks[0])
general['image'] = LoadTiff( fname[:-9]+'.tif')
return general
def ParsePolesInfo( polesinfo, general):
# Parse information about poles
if not polesinfo or len(polesinfo) == 0:
print('No poles information here')
return
# Determine number of poles and split information
polelist = []
idxPole = None
nPoles = 0
for line in polesinfo:
# Look for the next pole
if line.find( 'Pole number : {}'.format( nPoles+1)) > -1:
nPoles += 1
if nPoles == 0:
continue
if nPoles != len(polelist):
polelist += [[line]]
else:
polelist[ nPoles-1] += [line]
# print('Found {} poles'.format( nPoles) )
# for each split pole, get useful information and initialize a Pole object
poles = []
for pole in polelist:
for line in pole:
# Time pixels
if FindNumbers( line, 'Time pixel : ') is not None:
time = FindNumbers( line, 'Time pixel : ')
time = [x * general['time_step'] for x in time]
# # Times
# if FindNumbers( line, 'Time (s) : ') is not None:
# time = FindNumbers( line, 'Time (s) : ')
# Position
if FindNumbers( line, 'Position (um) : ') is not None:
position = FindNumbers( line, 'Position (um) : ')
# Intensity
if FindNumbers( line, 'Intensity : ') is not None:
intensity = FindNumbers( line, 'Intensity : ')
poles += [Pole( time, position, general['image'], time_step=general['time_step']) ]
return poles
def ParseTracksInfo( featureinfo, poles, general):
# Parse information about tracks
if not featureinfo or len(featureinfo) == 0:
print('No tracks information here')
return
# Determine number of tracks and split information
tracklist = []
idxTrack = None
nTracks = 0
for line in featureinfo:
# Look for the next track
if line.find( 'Feature number : {}'.format( nTracks+1)) > -1:
nTracks += 1
if nTracks == 0:
continue
if nTracks != len(tracklist):
tracklist += [[line]]
else:
tracklist[ nTracks-1] += [line]
# print('Found {} tracks'.format( nTracks) )
# for each split track, get useful information and initialize a Track object
tracks = []
for trck in tracklist:
for line in trck:
# Time pixels
if FindNumbers( line, 'Time pixel : ') is not None:
time = FindNumbers( line, 'Time pixel : ')
timePix = time
time = [x * general['time_step'] for x in time]
# # Time
# if FindNumbers( line, 'Time (s) : ') is not None:
# time = FindNumbers( line, 'Time (s) : ')
# Position
if FindNumbers( line, 'Position pixel : ') is not None:
positionPix = FindNumbers( line, 'Position pixel : ')
# Position
if FindNumbers( line, 'Position (um) : ') is not None:
position = FindNumbers( line, 'Position (um) : ')
# Intensity
if FindNumbers( line, 'Intensity : ') is not None:
intensity = FindNumbers( line, 'Intensity : ')
# Direction
if FindSingleSubstring( line, 'Feature direction : ') is not None:
direction = FindSingleSubstring( line, 'Feature direction : ')
direction = direction[0:-1]
# Line type
if FindSingleSubstring( line, 'Feature type : ') is not None:
line_type = FindSingleSubstring( line, 'Feature type : ')
line_type = line_type[0:-1]
tracks += [Track( time, position, general['image'], poles, 'Ambiguous', line_type, time_step=general['time_step'], pos_step=0.1067) ]
return tracks
def LoadTiff( fname):
# load tiff file
arr = plt.imread( fname)
if len( arr.shape) == 3:
arr = np.mean(arr,axis=2)
return arr
def FindSingleSubstring(strSearch, strLabel):
# Find a single substring that contains strLabel. We delete the strLabel.
if strSearch.find( strLabel) > -1:
strMatch = strSearch.replace( strLabel, '')
return strMatch
return None
def FindNumbers(strSearch, strLabel):
# Find numbers from a string that starts with strLabel
if strSearch.find( strLabel) > -1:
strMatch = strSearch.replace( strLabel, '')
strList = strMatch.split(',')
nums = [float(i) for i in strList]
return nums
return None
def PrintFile(fname):
# Print all the information from a file to screen
with open( fname) as f:
print( f.read() )
##########################################
if __name__ == "__main__":
print("no default implementation")
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,630
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/Load.py
|
#!/usr/bin/env python
import os, pdb
import yaml
import glob
'''
Name: Load.py
Description: loads and splits the tracks saved by the trackBuilder (kyman.mlapp) into general, poles and feature sections to be parsed by Kymograph.py
'''
# Class to load data from files
class Load:
def __init__(self, verbose=0):
file_name = 'track_files.yaml'
with open(file_name) as infile:
self.data = yaml.load(infile)
self.verbose = verbose
self.GetFilenames()
self.ReadFromFiles()
def GetFilenames(self):
# Expand filenames in the case of special characters
for strain, dat in self.data['strain'].items():
for idx,fpath in enumerate( dat['path']):
files = []
for fname in dat['files'][idx]:
temp = glob.glob( os.path.join(fpath,fname) )
for fil in temp:
head_tail = os.path.split(fil)
files += [ head_tail[1] ]
self.data['strain'][strain]['files'][idx] = files
def ReadFromFiles(self):
# Read information from all files given yaml data
for strain, dat in self.data['strain'].items():
for idx,fpath in enumerate( dat['path']):
self.data['strain'][strain]['geninfo'] = []
self.data['strain'][strain]['polesinfo'] = []
self.data['strain'][strain]['featureinfo'] = []
for fname in dat['files'][idx]:
gen, poles, feats = self.ReadFromFile( fpath, fname)
self.data['strain'][strain]['geninfo'] += [gen]
self.data['strain'][strain]['polesinfo'] += [poles]
self.data['strain'][strain]['featureinfo'] += [feats]
def ReadFromFile(self, fpath, fname):
# Read data from files and parse into general, poles and feature information
# Initialize lists
geninfo = []
polesinfo = []
featureinfo = []
if self.verbose:
self.PrintFile( fname)
# Add General Information
with open(fpath + fname) as fp:
addLine = None
for cnt, line in enumerate(fp):
if line.find( 'General Information') > -1:
addLine = 'G'
if line.find( 'Poles Information') > -1:
addLine = 'P'
if line.find( 'Feature Information') > -1:
addLine = 'F'
# Add General Information
if addLine == 'G':
geninfo.append( line)
# Add Poles Information
elif addLine == 'P':
polesinfo.append( line)
# Add Feature Information
elif addLine == 'F':
featureinfo.append( line)
return geninfo, polesinfo, featureinfo
def PrintFile(self, fname):
# Print all the information from a file to screen
fp = open( self.fpath + fname)
fc = fp.read()
print(fc)
fp.close()
##########################################
if __name__ == "__main__":
x = Load(verbose=1)
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,631
|
saadjansari/KymoAnalysis
|
refs/heads/main
|
/src/breakBipolar.py
|
#!/usr/bin/env python
import os, pdb
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from .Kymograph import *
import shutil
from random import sample
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
import pickle
'''
Name: breakBipolar.py
Description: Plots the pole separation of a bipolar file
'''
parent_path = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/temp')
# Strain folders
folds = ['wild type']
# folds = ['cut7-989TD,pkl1D,klp2D']
# folds = ['wild type','cut7-989TD,pkl1D,klp2D']
# savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_wt')
# savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_mutant')
savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/blahblah')
# slope_window = 25
# nsamples = 10
# thresh = [0.008, 0.005]
# get_data_from_files {{{
def get_data_from_files(parent_path, folds):
# lists
times = []
length = []
velocity = []
acceleration = []
strain_tag = []
file_tag = []
# folds is list containing different strain names
for jj,jfold in enumerate(folds):
# txt files
mainpath = parent_path / jfold
files2break = mainpath.glob('*txt')
for jfile, fil in enumerate(files2break):
kymo = Kymograph(fname=str(fil))
# Only do stuff if its bipolar
if len(kymo.poles) == 2:
# Times
time = np.array( sorted( np.hstack( (kymo.poles[0].time, kymo.poles[1].time) ) )[1::10] )
time = np.linspace(time[0], time[-1], int(np.ceil(time[-1]-time[0])))
# times.append(time)
# Calculate spindle length, velocity, acceleration
clen = np.absolute( kymo.poles[1].ifunc( time)- kymo.poles[0].ifunc(time))
cvel = list( (clen[1:]-clen[:-1]) / (time[1:] - time[:-1]) )
cvel.insert(0, cvel[0])
cvel = np.array(cvel)
cacc = list( (cvel[1:]-cvel[:-1]) / (time[1:] - time[:-1]) )
cacc.insert(0, cacc[0])
cacc = np.array(cacc)
for jt in range(len(time)):
times.append(time[jt])
length.append(clen[jt])
velocity.append(cvel[jt])
acceleration.append(cacc[jt])
strain_tag.append( jfold)
file_tag.append( os.path.basename(kymo.label) )
df = pd.DataFrame({'strain':strain_tag,
'index':file_tag,
'time':times,
'length':length,
'velocity':velocity,
'acceleration':acceleration,
})
return df
# }}}
# pair_plot {{{
def pair_plot(datframe, vars_compare, label=None, savePath=None, title=''):
fig,ax = plt.subplots(figsize=(12,9))
if label is None:
sns.pairplot(datframe, vars=vars_compare,
plot_kws=dict(marker="+", s=50,linewidth=3, alpha=0.1),
diag_kind='kde',
palette='Dark2', height=3)
else:
sns.pairplot(datframe, vars=vars_compare, hue=label,
plot_kws=dict(marker="+", s=50,linewidth=3, alpha=0.1),
diag_kind='kde',
palette='Dark2', height=3)
plt.tight_layout()
plt.title(title)
if savePath is not None:
plt.savefig(savePath)
plt.close()
# }}}
# Kmeans {{{
def do_KMeans(df, vars_compare, n_clusters=2, display=True, savePath=None):
data = df[vars_compare].to_numpy()
# scaler = StandardScaler()
# X_std = scaler.fit_transform(data)
X_std = data
print('KMeans clustering: N_clusters = {}'.format(n_clusters))
kmeans = KMeans(n_clusters=n_clusters,
init='k-means++',
max_iter=300,
n_init=10,
random_state=10)
model = kmeans.fit(X_std)
labels = model.predict(X_std)
sil_score = silhouette_score(X_std,labels)
print('Silhouette Score = {0:.3f}'.format(sil_score))
df['label'] = labels
df = labels_ordered(df,'length')
if display and savePath is not None:
pair_plot(df, vars_compare, label="label",title='kmeans',
savePath=savePath)
return df, model
# }}}
# GMM {{{
def do_GaussianMixtureModel(df, vars_compare, n_clusters=2, display=True, savePath=None):
data=df[vars_compare].to_numpy()
scaler = StandardScaler()
X_std = scaler.fit_transform(data)
# define the model
print('Gaussian Mixture Model: N_components = {}'.format(n_clusters))
model = GaussianMixture(n_components=n_clusters).fit(X_std)
labels = model.predict(X_std)
sil_score = silhouette_score(X_std,labels)
print('Silhouette Score = {0:.3f}'.format(sil_score))
df['label'] = labels
df = labels_ordered(df,'length')
if display and savePath is not None:
pair_plot(df, vars_compare, label="label",title='kmeans',
savePath=savePath)
return df, model
# }}}
# LabelsOrdered {{{
def labels_ordered( df, ref_name):
label_list_new = []
# Get unique labels and the mean values of the reference variable
labels = sorted(df.label.unique())
mu = np.zeros( len(labels) )
for jlab in range(len(labels)):
mu[jlab] = df[df.label == labels[jlab]][ref_name].mean()
# Create mapping from old_label to new
labels_new = [x for _,x in sorted( list(zip(mu,labels)), key=lambda x:x[0])]
# pdb.set_trace()
mapping = {k:v for k,v in zip(labels,labels_new)}
print(mapping)
for jlab in df.label:
label_list_new.append( mapping[jlab])
df.label = label_list_new
return df
# }}}
# plotClassifiedTracks {{{
def plotClassifiedTracks(df, saveParent=None, nSamples=50, model=None):
# for each unique strain, make a plot
# extract tracks
strains = df.strain.unique().tolist()
for strain in strains:
fig,(ax0,ax1,ax2) = plt.subplots(1,3,figsize=(18,4.5), sharey=True)
indices = df[df.strain == strain]['index'].unique()
# Pick nSamples indices at random
# indices2plot = sample(list(indices), nSamples)
indices2plot = indices[:nSamples]
# plot each track
for ind in indices2plot:
# get track to plot
track = df[ (df['strain'] == strain) & (df['index'] == ind)]
time = np.array(track.time)
length = np.array(track.length)
label = np.array(track.label)
# Plot Axis 0
ax0.plot(time, length, alpha=0.5, color='k', lw=2)
# Plot Axis 1
# len_group0 and len_group1
len_0 = length.copy()
len_1 = length.copy()
idx0 = np.where(label == 0)[0]
idx1 = np.where(label == 1)[0]
len_0[idx1] = np.nan
len_1[idx0] = np.nan
ax1.plot(time, len_0, alpha=0.5, lw=2, color='green')
ax1.plot(time, len_1, alpha=0.5, lw=2,color='purple')
# Plot Axis 2
label_new = np.array(ForceLabelsOneWay( SmoothClassifiedLabels(label, span=100) ) )
len_0 = length.copy()
len_1 = length.copy()
idx0 = np.where(label_new == 0)[0]
idx1 = np.where(label_new == 1)[0]
len_0[idx1] = np.nan
len_1[idx0] = np.nan
# pdb.set_trace()
ax2.plot(time, len_0, alpha=0.5, lw=2,color='green')
ax2.plot(time, len_1, alpha=0.5, lw=2,color='purple')
# Labels/Legend Axis 0
ax0.set(ylabel=r'Spindle Length $(\mu m)$', xlabel='Time (s)')
# Labels/Legend Axis 1
ax1.plot([],[], alpha=0.7, color='green', label='Group 0')
ax1.plot([],[], alpha=0.7, color='purple', label='Group 1')
ax1.legend()
ax1.set(xlabel='Time (s)')
# Labels/Legend Axis 2
ax2.plot([],[], alpha=0.7, color='green', label='Group 0')
ax2.plot([],[], alpha=0.7, color='purple', label='Group 1')
ax1.legend()
ax2.set(xlabel='Time (s)')
plt.suptitle(strain)
plt.tight_layout()
if saveParent is not None:
if model is None:
plt.savefig( saveParent / 'tracks_{0}.pdf'.format(strain))
else:
plt.savefig( saveParent / 'tracks_{0}_{1}.pdf'.format(model,strain))
plt.close()
# }}}
# SmoothClassifiedLabels {{{
def SmoothClassifiedLabels(label, span=100):
# smooth_data {{{
def smooth_data(arr, span):
re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same")
# The "my_average" part: shrinks the averaging window on the side that
# reaches beyond the data, keeps the other side the same size as given
# by "span"
re[0] = np.average(arr[:span])
for i in range(1, span + 1):
re[i] = np.average(arr[:i + span])
re[-i] = np.average(arr[-i - span:])
return re
# }}}
# Smoothed Labels
label_new = np.where(np.array( smooth_data( label,min([span, int(len(label)/2)]))) >= 0.5, 1, 0)
# Once 1, always 1
# label_perm = [max(label_new[:1+jj]) for jj in range(len(label_new))]
return label_new
# }}}
# ForceLabelsOneWay {{{
def ForceLabelsOneWay( label):
labels = [np.max(label[:1+idx]) for idx in range(len(label))]
return np.array(labels)
# }}}
if not Path.exists( savepath):
os.mkdir( savepath)
# Load data into dataframe
df = get_data_from_files(parent_path, folds)
names = ['velocity']
# Display (pre clustering)
pair_plot(df, names, savePath=savepath/'features_grid_raw.png')
# Kmeans
df_kmean, model_kmean = do_KMeans(df.copy(),names, savePath=savepath/'features_grid_kmeans.png')
print(df_kmean.groupby('label').mean() )
plotClassifiedTracks(df_kmean, model='kmeans',saveParent=savepath)
# Save model
with open(parent_path / 'kmeans.pickle', 'wb') as f:
pickle.dump(model_kmean, f)
# GMM
# df_gmm, model_gmm = do_GaussianMixtureModel(df.copy(),names, savePath=savepath/'features_grid_gmm.png')
# print(df_gmm.groupby('label').mean() )
# plotClassifiedTracks(df_gmm, model='gmm',saveParent=savepath)
# if vel_thresh[0]==1:
# anaphase_time = 'Always'
# elif vel_thresh[0]==0 and vel_thresh[-1]==1:
# anaphase_time = timelist[ np.where(np.array(vel_thresh)>0.5)[0][0] ]
# elif vel_thresh[-1]==0:
# anaphase_time = 'Never'
# # anaphase_time = timelist[ np.where(np.array(vel_thresh)>0.5)[0][0] ]
# print( '{0} --> Anaphase B Transition = {1} sec'.format( files2break[idx].stem,anaphase_time))
|
{"/src/Features.py": ["/src/node_graph.py"], "/src/smooth_test.py": ["/src/Kymograph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"], "/src/Kymograph.py": ["/src/node_graph.py", "/src/Features.py", "/src/ReadFiles.py"], "/KymographAnalysis.py": ["/src/Strain.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/ReadFiles.py": ["/src/Track.py"], "/src/breakBipolar.py": ["/src/Kymograph.py"]}
|
2,635
|
carina28/interactive-broker-python-api
|
refs/heads/master
|
/test_client.py
|
from ibw.client import IBClient
from ibw.configAlex import REGULAR_ACCOUNT, REGULAR_PASSWORD, REGULAR_USERNAME, PAPER_ACCOUNT, PAPER_PASSWORD, PAPER_USERNAME
# Create a new session of the IB Web API.
ib_client = IBClient(username = PAPER_USERNAME, password = PAPER_PASSWORD, account = PAPER_ACCOUNT)
# create a new session.
ib_client.create_session()
# grab the account data.
account_data = ib_client.portfolio_accounts()
# print the data.
print(account_data)
# Grab historical prices.
aapl_prices = ib_client.market_data_history(conid = ['265598'], period = '1d', bar = '5min')
# print the prices.
print(aapl_prices)
|
{"/test_client.py": ["/ibw/client.py"]}
|
2,636
|
carina28/interactive-broker-python-api
|
refs/heads/master
|
/ibw/client.py
|
import os
import sys
import json
import time
import pathlib
import urllib
import requests
import subprocess
import certifi
import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(category=InsecureRequestWarning)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
class IBClient():
def __init__(self, username = None, password = None, account = None):
'''
Initalizes a new IBClient Object with the username and password of the
account holder.
'''
self.ACCOUNT = account
self.USERNAME = username
self.PASSWORD = password
self.CLIENT_PORTAL_FOLDER = pathlib.Path.cwd().joinpath('clientportal.gw').resolve()
self.API_VERSION = 'v1/'
self.TESTING_FLAG = False
self._operating_system = sys.platform
# Define URL Components
IB_GATEWAY_HOST = r"https://localhost"
IB_GATEWAY_PORT = r"5000"
self.IB_GATEWAY_PATH = IB_GATEWAY_HOST + ":" + IB_GATEWAY_PORT
def create_session(self):
'''
Creates a new session with Interactive Broker using the credentials
passed through when the Robot was initalized.
'''
# Assuming the Server is Running, try and grab the Auth Status Endpoint.
try:
auth_response = self.is_authenticated()
except requests.exceptions.SSLError:
auth_response = False
except requests.exceptions.ConnectionError:
auth_response = False
# Scenario 1, Is_Authenticated endpoint return a bad status code so we need to connect again.
if auth_response == False:
# If it isn't then connect.
self.connect()
# finall exit the script.
sys.exit()
# Scenario 2, we got a successful response from the server but we aren't authenticated..
elif auth_response != False and 'authenticated' not in auth_response.keys():
# Before I can reauthenticate, I need to validate the session.
self.validate()
# Then reauthenticate.
self.reauthenticate()
# Then see if we are validated.
re_auth_response = self.is_authenticated()
# if reauthenticaton was successful then proceed to update accounts.
if re_auth_response['authenticated'] == True:
# Update the Account for the Session, so it uses the account passed through during initalization.
update_account_status = self.update_server_account(account_id=self.ACCOUNT)
# if that was successful, then let the User know we are good at this stage and proceed to next step.
if update_account_status == True:
print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')
return True
# Scenario 3, we got a successful response from the server and we are authenticated.
elif auth_response != False and 'authenticated' in auth_response.keys() and auth_response['authenticated'] == True:
# To be safe I just validate the session.
self.validate()
# Then I update the Account for the Session, so it uses the account passed through during initalization.
update_account_status = self.update_server_account(account_id=self.ACCOUNT)
# if that was successful, then let the User know we are good at this stage and proceed to next step.
if update_account_status == True:
print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')
return True
# Scenario 3, we got a successful response from the server and we are authenticated.
elif auth_response != False and 'authenticated' in auth_response.keys() and auth_response['authenticated'] == False:
# To be safe I just validate the session.
self.validate()
# Then reauthenticate.
self.reauthenticate()
# Then I update the Account for the Session, so it uses the account passed through during initalization.
update_account_status = self.update_server_account(account_id=self.ACCOUNT)
# if that was successful, then let the User know we are good at this stage and proceed to next step.
if update_account_status == True:
print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')
return True
def connect(self):
if self._operating_system == 'win32':
IB_WEB_API_PROC = ["cmd", "/k", r"bin\run.bat", r"root\conf.yaml"]
subprocess.Popen(args = IB_WEB_API_PROC, cwd = self.CLIENT_PORTAL_FOLDER, creationflags = subprocess.CREATE_NEW_CONSOLE)
elif self._operating_system == 'darwin':
IB_WEB_API_PROC = ["open", "-F", "-a", "Terminal", r"bin/run.sh", r"root/conf.yaml"]
subprocess.Popen(args = IB_WEB_API_PROC, cwd = self.CLIENT_PORTAL_FOLDER)
# redirect to the local host auth window.
self._auth_redirect()
def _headers(self, mode = 'json'):
'''
Returns a dictionary of default HTTP headers for calls to TD Ameritrade API,
in the headers we defined the Authorization and access token.
NAME: mode
DESC: Defines the content-type for the headers dictionary.
default is 'json'. Possible values are ['json','form']
TYPE: String
'''
if mode == 'json':
headers = {'Content-Type':'application/json'}
elif mode == 'form':
headers = {'Content-Type':'application/x-www-form-urlencoded'}
return headers
def _build_url(self, endpoint = None):
'''
builds a url for a request.
NAME: endpoint
DESC: The URL that needs conversion to a full endpoint URL.
TYPE: String
RTYPE: String
'''
# otherwise build the URL
return urllib.parse.unquote(urllib.parse.urljoin(self.IB_GATEWAY_PATH, self.API_VERSION) + r'portal/' + endpoint)
def _make_request(self, endpoint = None, req_type = None, params = None):
'''
Handles all the requests made by the client and correctly organizes
the information so it is sent correctly. Additionally it will also
build the URL.
NAME: endpoint
DESC: The endpoint we wish to request.
TYPE: String
NAME: type
DESC: Defines the type of request to be made. Can be one of four
possible values ['GET','POST','DELETE','PUT']
TYPE: String
NAME: params
DESC: Any arguments that are to be sent along in the request. That
could be parameters of a 'GET' request, or a data payload of a
'POST' request.
TYPE: Dictionary
'''
# first build the url
url = self._build_url(endpoint = endpoint)
# Scenario 1: POST with a payload.
if req_type == 'POST'and params is not None:
# make sure it's a JSON String
headers = self._headers(mode = 'json')
# headers['accept'] = 'application/json'
# headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
# grab the response.
response = requests.post(url, headers = headers, verify = False, data = json.dumps(params))
# SCENARIO 2: POST without a payload.
elif req_type == 'POST'and params is None:
# grab the response.
response = requests.post(url, headers = self._headers(mode = 'json'), verify = False)
# SCENARIO 3: GET without parameters.
elif req_type == 'GET' and params is None:
# grab the response.
response = requests.get(url, headers = self._headers(mode = 'json'), verify = False)
# SCENARIO 3: GET with parameters.
elif req_type == 'GET' and params is not None:
# grab the response.
headers = self._headers(mode = 'json')
# headers['accept'] = 'application/json'
response = requests.get(url, headers = headers, verify = False, params = params)
# grab the status code
if response.status_code != 200:
print(response.url)
print(response.headers)
print(response.content)
print(response.status_code)
return response
def _auth_redirect(self):
'''
Opens a new Browser window with the default one specified by the
operating system. From there will redirect to the URL that the user
needs to go to in order to authenticate the newly started session.
'''
print('\n')
print('-'*80)
print("The Interactive Broker server is not currently running, so we cannot authenticate the session.")
print("The server will startup, and the browser will redirect you to the Local Host you specified in your config file.")
print("Please login to your account with your username and password and rerun the script to begin the session.")
print("You'll be redirected in 3 seconds.")
print('-'*80)
print('\n')
time.sleep(3)
# Redirect to the URL.
if self._operating_system:
subprocess.Popen(["cmd", "/k", "start", self.IB_GATEWAY_PATH], shell=False)
elif self._operating_system:
subprocess.run(["open", self.IB_GATEWAY_PATH], shell=False)
return True
def _prepare_arguments_list(self, parameter_list = None):
'''
Some endpoints can take multiple values for a parameter, this
method takes that list and creates a valid string that can be
used in an API request. The list can have either one index or
multiple indexes.
NAME: parameter_list
DESC: A list of paramater values assigned to an argument.
TYPE: List
EXAMPLE:
SessionObject.prepare_arguments_list(parameter_list = ['MSFT', 'SQ'])
'''
# validate it's a list.
if type(parameter_list) is list:
# specify the delimeter and join the list.
delimeter = ','
parameter_list = delimeter.join(parameter_list)
return parameter_list
'''
SESSION ENDPOINTS
'''
def validate(self):
'''
Validates the current session for the SSO user.
'''
# define request components
endpoint = r'sso/validate'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def tickle(self):
'''
If the gateway has not received any requests for several minutes an open session will
automatically timeout. The tickle endpoint pings the server to prevent the
session from ending.
'''
# define request components
endpoint = r'tickle'
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def logout(self):
'''
Logs the user out of the gateway session. Any further activity requires
re-authentication.
'''
# define request components
endpoint = r'logout'
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def reauthenticate(self):
'''
Provides a way to reauthenticate to the Brokerage system as long as there
is a valid SSO session, see /sso/validate.
'''
# define request components
endpoint = r'iserver/reauthenticate'
req_type = 'POST'
# this is special, I don't want the JSON content right away.
content = self._make_request(endpoint = endpoint, req_type = req_type)
if content.status_code != 200:
return False
else:
return content.json()
def is_authenticated(self):
'''
Current Authentication status to the Brokerage system. Market Data and
Trading is not possible if not authenticated, e.g. authenticated
shows false.
'''
# define request components
endpoint = 'iserver/auth/status'
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
'''
MARKET DATA ENDPOINTS
'''
def market_data(self, conids = None, since = None, fields = None):
'''
Get Market Data for the given conid(s). The end-point will return by
default bid, ask, last, change, change pct, close, listing exchange.
See response fields for a list of available fields that can be request
via fields argument. The endpoint /iserver/accounts should be called
prior to /iserver/marketdata/snapshot. To receive all available fields
the /snapshot endpoint will need to be called several times.
NAME: conid
DESC: The list of contract IDs you wish to pull current quotes for.
TYPE: List<String>
NAME: since
DESC: Time period since which updates are required.
Uses epoch time with milliseconds.
TYPE: String
NAME: fields
DESC: List of fields you wish to retrieve for each quote.
TYPE: List<String>
'''
# define request components
endpoint = 'iserver/marketdata/snapshot'
req_type = 'GET'
# join the two list arguments so they are both a single string.
conids_joined = self._prepare_arguments_list(parameter_list = conids)
if fields is not None:
fields_joined = ",".join(str(n) for n in fields)
else:
fields_joined = ""
# define the parameters
if since is None:
params = {'conids':conids_joined,
'fields':fields_joined}
else:
params = {'conids':conids_joined,
'since':since,
'fields':fields_joined}
content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()
return content
def market_data_history(self, conid = None, period = None, bar = None):
'''
Get history of market Data for the given conid, length of data is controlled by period and
bar. e.g. 1y period with bar=1w returns 52 data points.
NAME: conid
DESC: The contract ID for a given instrument. If you don't know the contract ID use the
`search_by_symbol_or_name` endpoint to retrieve it.
TYPE: String
NAME: period
DESC: Specifies the period of look back. For example 1y means looking back 1 year from today.
Possible values are ['1d','1w','1m','1y']
TYPE: String
NAME: bar
DESC: Specifies granularity of data. For example, if bar = '1h' the data will be at an hourly level.
Possible values are ['5min','1h','1w']
TYPE: String
'''
# define request components
endpoint = 'iserver/marketdata/history'
req_type = 'GET'
params = {'conid':conid, 'period':period, 'bar':bar}
content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()
return content
'''
SERVER ACCOUNTS ENDPOINTS
'''
def server_accounts(self):
'''
Returns a list of accounts the user has trading access to, their
respective aliases and the currently selected account. Note this
endpoint must be called before modifying an order or querying
open orders.
'''
# define request components
endpoint = 'iserver/accounts'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type)
return content
def update_server_account(self, account_id = None, check = False):
'''
If an user has multiple accounts, and user wants to get orders, trades,
etc. of an account other than currently selected account, then user
can update the currently selected account using this API and then can
fetch required information for the newly updated account.
NAME: account_id
DESC: The account ID you wish to set for the API Session. This will be used to
grab historical data and make orders.
TYPE: String
'''
# define request components
endpoint = 'iserver/account'
req_type = 'POST'
params = {'acctId':account_id}
content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()
if 'status_code' in content.keys():
time.sleep(1)
content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()
return content
def server_accountPNL(self):
'''
Returns an object containing PnLfor the selected account and its models
(if any).
'''
# define request components
endpoint = 'iserver/account/pnl/partitioned'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
'''
CONTRACT ENDPOINTS
'''
def symbol_search(self, symbol):
'''
Performs a symbol search for a given symbol and returns information related to the
symbol including the contract id.
'''
# define the request components
endpoint = 'iserver/secdef/search'
req_type = 'POST'
payload = {'symbol':symbol}
content = self._make_request(endpoint = endpoint, req_type = req_type, params= payload).json()
return content
'''
PORTFOLIO ACCOUNTS ENDPOINTS
'''
def portfolio_accounts(self):
'''
In non-tiered account structures, returns a list of accounts for which the
user can view position and account information. This endpoint must be called prior
to calling other /portfolio endpoints for those accounts. For querying a list of accounts
which the user can trade, see /iserver/accounts. For a list of subaccounts in tiered account
structures (e.g. financial advisor or ibroker accounts) see /portfolio/subaccounts.
'''
# define request components
endpoint = 'portfolio/accounts'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_sub_accounts(self):
'''
Used in tiered account structures (such as financial advisor and ibroker accounts) to return a
list of sub-accounts for which the user can view position and account-related information. This
endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To
query a list of accounts the user can trade, see /iserver/accounts.
'''
# define request components
endpoint = r'portfolio/subaccounts'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_account_info(self, account_id = None):
'''
Used in tiered account structures (such as financial advisor and ibroker accounts) to return a
list of sub-accounts for which the user can view position and account-related information. This
endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To
query a list of accounts the user can trade, see /iserver/accounts.
NAME: account_id
DESC: The account ID you wish to return info for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/meta'.format(account_id)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_account_summary(self, account_id = None):
'''
Returns information about margin, cash balances and other information
related to specified account. See also /portfolio/{accountId}/ledger.
/portfolio/accounts or /portfolio/subaccounts must be called
prior to this endpoint.
NAME: account_id
DESC: The account ID you wish to return info for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/summary'.format(account_id)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_account_ledger(self, account_id = None):
'''
Information regarding settled cash, cash balances, etc. in the account's
base currency and any other cash balances hold in other currencies. /portfolio/accounts
or /portfolio/subaccounts must be called prior to this endpoint. The list of supported
currencies is available at https://www.interactivebrokers.com/en/index.php?f=3185.
NAME: account_id
DESC: The account ID you wish to return info for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/ledger'.format(account_id)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_account_allocation(self, account_id = None):
'''
Information about the account's portfolio allocation by Asset Class, Industry and
Category. /portfolio/accounts or /portfolio/subaccounts must be called prior to
this endpoint.
NAME: account_id
DESC: The account ID you wish to return info for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/allocation'.format(account_id)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_accounts_allocation(self, account_ids = None):
'''
Similar to /portfolio/{accountId}/allocation but returns a consolidated view of of all the
accounts returned by /portfolio/accounts. /portfolio/accounts or /portfolio/subaccounts must
be called prior to this endpoint.
NAME: account_ids
DESC: A list of Account IDs you wish to return alloacation info for.
TYPE: List<String>
'''
# define request components
endpoint = r'portfolio/allocation'
req_type = 'POST'
payload = account_ids
content = self._make_request(endpoint = endpoint, req_type = req_type, params = payload).json()
return content
def portfolio_account_positions(self, account_id = None, page_id = None):
'''
Returns a list of positions for the given account. The endpoint supports paging,
page's default size is 30 positions. /portfolio/accounts or /portfolio/subaccounts
must be called prior to this endpoint.
NAME: account_id
DESC: The account ID you wish to return positions for.
TYPE: String
NAME: page_id
DESC: The page you wish to return if there are more than 1. The
default value is '0'.
TYPE: String
ADDITIONAL ARGUMENTS NEED TO BE ADDED!!!!!
'''
# make sure we have a page ID.
if page_id is None:
page_id = 0
else:
page_id = page_id
# define request components
endpoint = r'portfolio/{}/positions/{}'.format(account_id, page_id)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
#
# RENAME THIS
#
def portfolio_account_position(self, account_id = None, conid = None):
'''
Returns a list of all positions matching the conid. For portfolio models the conid
could be in more than one model, returning an array with the name of the model it
belongs to. /portfolio/accounts or /portfolio/subaccounts must be called prior to
this endpoint.
NAME: account_id
DESC: The account ID you wish to return positions for.
TYPE: String
NAME: conid
DESC: The contract ID you wish to find matching positions for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/position/{}'.format(account_id, conid)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
#
# GET MORE DETAILS ON THIS
#
def portfolio_positions_invalidate(self, account_id = None):
'''
Invalidates the backend cache of the Portfolio. ???
NAME: account_id
DESC: The account ID you wish to return positions for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/{}/positions/invalidate'.format(account_id)
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def portfolio_positions(self, conid = None):
'''
Returns an object of all positions matching the conid for all the selected accounts.
For portfolio models the conid could be in more than one model, returning an array
with the name of the model it belongs to. /portfolio/accounts or /portfolio/subaccounts
must be called prior to this endpoint.
NAME: conid
DESC: The contract ID you wish to find matching positions for.
TYPE: String
'''
# define request components
endpoint = r'portfolio/positions/{}'.format(conid)
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
'''
TRADES ENDPOINTS
'''
def trades(self):
'''
Returns a list of trades for the currently selected account for current day and
six previous days.
'''
# define request components
endpoint = r'iserver/account/trades'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
'''
ORDERS ENDPOINTS
'''
def get_live_orders(self):
'''
The end-point is meant to be used in polling mode, e.g. requesting every
x seconds. The response will contain two objects, one is notification, the
other is orders. Orders is the list of orders (cancelled, filled, submitted)
with activity in the current day. Notifications contains information about
execute orders as they happen, see status field.
'''
# define request components
endpoint = r'iserver/account/orders'
req_type = 'GET'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
def place_order(self, account_id = None, order = None):
'''
Please note here, sometimes this end-point alone can't make sure you submit the order
successfully, you could receive some questions in the response, you have to to answer
them in order to submit the order successfully. You can use "/iserver/reply/{replyid}"
end-point to answer questions.
NAME: account_id
DESC: The account ID you wish to place an order for.
TYPE: String
NAME: order
DESC: Either an IBOrder object or a dictionary with the specified payload.
TYPE: IBOrder or Dict
'''
if type(order) is dict:
order = order
else:
order = order.create_order()
# define request components
endpoint = r'iserver/account/{}/order'.format(account_id)
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()
return content
def place_orders(self, account_id = None, orders = None):
'''
An extension of the `place_order` endpoint but allows for a list of orders. Those orders may be
either a list of dictionary objects or a list of IBOrder objects.
NAME: account_id
DESC: The account ID you wish to place an order for.
TYPE: String
NAME: orders
DESC: Either a list of IBOrder objects or a list of dictionaries with the specified payload.
TYPE: List<IBOrder Object> or List<Dictionary>
'''
# EXTENDED THIS
if type(orders) is list:
orders = orders
else:
orders = orders
# define request components
endpoint = r'iserver/account/{}/orders'.format(account_id)
req_type = 'POST'
try:
content = self._make_request(endpoint = endpoint, req_type = req_type, params = orders).json()
except:
content = self._make_request(endpoint = endpoint, req_type = req_type, params = orders)
return content
def place_order_scenario(self, account_id = None, order = None):
'''
This end-point allows you to preview order without actually submitting the
order and you can get commission information in the response.
NAME: account_id
DESC: The account ID you wish to place an order for.
TYPE: String
NAME: order
DESC: Either an IBOrder object or a dictionary with the specified payload.
TYPE: IBOrder or Dict
'''
if type(order) is dict:
order = order
else:
order = order.create_order()
# define request components
endpoint = r'iserver/account/{}/order/whatif'.format(account_id)
req_type = 'POST'
try:
content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()
except:
content = self._make_request(endpoint = endpoint, req_type = req_type, params = order)
return content
def modify_order(self, account_id = None, customer_order_id = None, order = None):
'''
Modifies an open order. The /iserver/accounts endpoint must first
be called.
NAME: account_id
DESC: The account ID you wish to place an order for.
TYPE: String
NAME: customer_order_id
DESC: The customer order ID for the order you wish to MODIFY.
TYPE: String
NAME: order
DESC: Either an IBOrder object or a dictionary with the specified payload.
TYPE: IBOrder or Dict
'''
if type(order) is dict:
order = order
else:
order = order.create_order()
# define request components
endpoint = r'iserver/account/{}/order/{}'.format(account_id, customer_order_id)
req_type = 'POST'
content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()
return content
def delete_order(self, account_id = None, customer_order_id = None):
'''
Deletes the order specified by the customer order ID.
NAME: account_id
DESC: The account ID you wish to place an order for.
TYPE: String
NAME: customer_order_id
DESC: The customer order ID for the order you wish to DELETE.
TYPE: String
'''
# define request components
endpoint = r'iserver/account/{}/order/{}'.format(account_id, customer_order_id)
req_type = 'DELETE'
content = self._make_request(endpoint = endpoint, req_type = req_type).json()
return content
|
{"/test_client.py": ["/ibw/client.py"]}
|
2,637
|
carina28/interactive-broker-python-api
|
refs/heads/master
|
/ibw/config.py
|
REGULAR_USERNAME = ''
REGULAR_PASSWORD = ''
REGULAR_ACCOUNT = ''
PAPER_USERNAME = ''
PAPER_PASSWORD = ''
PAPER_ACCOUNT = ''
|
{"/test_client.py": ["/ibw/client.py"]}
|
2,638
|
safwanvk/shop
|
refs/heads/master
|
/shop/products/forms.py
|
from wtforms import Form, BooleanField, StringField, PasswordField, validators, IntegerField, TextAreaField, \
DecimalField
from flask_wtf.file import FileAllowed, FileRequired, FileField
class AddProductForm(Form):
name = StringField('Name', [validators.DataRequired()])
price = DecimalField('Price', [validators.DataRequired()])
discount = IntegerField('Discount', default=0)
stock = IntegerField('Stock', [validators.DataRequired()])
discription = TextAreaField('Discription', [validators.DataRequired()])
colors = TextAreaField('Colors', [validators.DataRequired()])
image_1 = FileField('Image 1',
validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])
image_2 = FileField('Image 2',
validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])
image_3 = FileField('Image 3',
validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])
|
{"/shop/products/routes.py": ["/shop/__init__.py", "/shop/products/forms.py"], "/shop/admin/routes.py": ["/shop/__init__.py"]}
|
2,639
|
safwanvk/shop
|
refs/heads/master
|
/shop/__init__.py
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_uploads import IMAGES, UploadSet, configure_uploads, patch_request_class
BASE_DIRS = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///shop.db'
app.config['SECRET_KEY'] = 'ffjdbfdbfjsdgfs43543745'
app.config['UPLOADED_PHOTOS_DEST'] = os.path.join(BASE_DIRS, 'static/images')
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
from .admin import routes
from .products import routes
|
{"/shop/products/routes.py": ["/shop/__init__.py", "/shop/products/forms.py"], "/shop/admin/routes.py": ["/shop/__init__.py"]}
|
2,640
|
safwanvk/shop
|
refs/heads/master
|
/shop/products/routes.py
|
import secrets
from flask import render_template, request, flash, url_for, session
from shop import app
from werkzeug.utils import redirect
from .forms import AddProductForm
from .models import Brand, Category, Product
from .. import db, photos
@app.route('/add-brand', methods=['GET', 'POST'])
def add_brand():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
if request.method == 'POST':
get_brand = request.form['brand']
brand = Brand(name=get_brand)
db.session.add(brand)
db.session.commit()
flash(f'The brand {get_brand} was added to your database', 'success')
return redirect(url_for('add_brand'))
return render_template('products/add_brand.html', brands='brands')
@app.route('/update-brand/<int:id>', methods=['GET', 'POST'])
def update_brand(id):
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
update_brand = Brand.query.get_or_404(id)
brand = request.form.get('brand')
if request.method == 'POST':
update_brand.name = brand
flash(f'Your brand has been updated', 'success')
db.session.commit()
return redirect(url_for('view_brands'))
return render_template('products/update_brand.html', update_brand=update_brand)
@app.route('/add-category', methods=['GET', 'POST'])
def add_category():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
if request.method == 'POST':
get_category = request.form['category']
category = Category(name=get_category)
db.session.add(category)
db.session.commit()
flash(f'The Category {get_category} was added to your database', 'success')
return redirect(url_for('add_category'))
return render_template('products/add_brand.html')
@app.route('/update-category/<int:id>', methods=['GET', 'POST'])
def update_category(id):
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
update_category = Category.query.get_or_404(id)
category = request.form.get('category')
if request.method == 'POST':
update_category.name = category
flash(f'Your category has been updated', 'success')
db.session.commit()
return redirect(url_for('view_categories'))
return render_template('products/update_brand.html', update_category=update_category)
@app.route('/add-product', methods=['GET', 'POST'])
def add_product():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
brands = Brand.query.all()
categories = Category.query.all()
form = AddProductForm(request.form)
if request.method == 'POST':
name = form.name.data
price = form.price.data
discount = form.discount.data
stock = form.stock.data
colors = form.colors.data
desc = form.discription.data
brand = request.form.get('brand')
category = request.form.get('category')
image_1 = photos.save(request.files.get('image_1'), name=secrets.token_hex(10) + '.')
image_2 = photos.save(request.files.get('image_2'), name=secrets.token_hex(10) + '.')
image_3 = photos.save(request.files.get('image_3'), name=secrets.token_hex(10) + '.')
addpro = Product(name=name, price=price, discount=discount, stock=stock, colors=colors, desc=desc,
brand_id=brand, category_id=category, image_1=image_1, image_2=image_2, image_3=image_3)
db.session.add(addpro)
flash(f'The product {name} has been added to your database', 'success')
db.session.commit()
return redirect(url_for('admin'))
return render_template('products/add_product.html', form=form, brands=brands, categories=categories)
@app.route('/update-product/<int:id>', methods=['GET', 'POST'])
def update_product(id):
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
brands = Brand.query.all()
categories = Category.query.all()
product = Product.query.get_or_404(id)
brand = request.form.get('brand')
category = request.form.get('category')
form = AddProductForm(request.form)
if request.method == 'POST':
product.name = form.name.data
product.price = form.price.data
product.discount = form.discount.data
product.stock = form.stock.data
product.brand_id = brand
product.category_id = category
product.colors = form.colors.data
product.desc = form.discription.data
db.session.commit()
flash(f'Your product has been updated', 'success')
return redirect(url_for('admin'))
form.name.data = product.name
form.price.data = product.price
form.discount.data = product.discount
form.stock.data = product.stock
form.colors.data = product.colors
form.discription.data = product.desc
return render_template('products/update_product.html', form=form, brands=brands, categories=categories, product=product)
|
{"/shop/products/routes.py": ["/shop/__init__.py", "/shop/products/forms.py"], "/shop/admin/routes.py": ["/shop/__init__.py"]}
|
2,641
|
safwanvk/shop
|
refs/heads/master
|
/shop/admin/routes.py
|
from flask import Flask, render_template, request, flash, url_for, session
from werkzeug.utils import redirect
from .forms import RegistrationForm, LoginForm
from .models import User
from .. import app, db, bcrypt
from ..products.models import Product, Brand, Category
@app.route('/')
def index():
return render_template('admin/index.html')
@app.route('/admin')
def admin():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
products = Product.query.all()
return render_template('admin/index.html', products=products)
@app.route('/brands')
def view_brands():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
brands = Brand.query.order_by(Brand.id.desc()).all()
return render_template('admin/brands.html', brands=brands)
@app.route('/categories')
def view_categories():
if 'email' not in session:
flash(f'Please login first', 'danger')
return redirect(url_for('login'))
categories = Category.query.order_by(Category.id.desc()).all()
return render_template('admin/brands.html', categories=categories)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
hash_password = bcrypt.generate_password_hash(form.password.data)
user = User(name=form.name.data, username=form.username.data, email=form.email.data,
password=hash_password)
db.session.add(user)
db.session.commit()
flash(f'Welcome {form.name.data} Thank you for registering', 'success')
return redirect(url_for('index'))
return render_template('admin/register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
session['email'] = form.email.data
flash(f'Welcome {form.email.data} You are logged in now', 'success')
return redirect(request.args.get('next') or url_for('admin'))
else:
flash('Wrong Password please try again', 'danger')
return render_template('admin/login.html', form=form)
|
{"/shop/products/routes.py": ["/shop/__init__.py", "/shop/products/forms.py"], "/shop/admin/routes.py": ["/shop/__init__.py"]}
|
2,642
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/core/serializers.py
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from core.models import URLInfo
class ShorterIRLSerializer(serializers.Serializer):
url = serializers.URLField(max_length=250, min_length=None, allow_blank=False, label=_('URL'), help_text=_('URL'))
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,643
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/core/urls.py
|
from django.urls import path, include
from shorturl.utils.apps import get_api_url
from .views import (
ShorterAPIView, GetFullURLAPIView
)
urlpatterns = [
path(get_api_url(url_name='url-shorten'), ShorterAPIView.as_view(), name='api-url_shorten'),
path(get_api_url(url_name='full-url/<str:url_hash>'), GetFullURLAPIView.as_view(), name='api-full_url')
]
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,644
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/core/views.py
|
from hashlib import md5
from django.shortcuts import render
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from core.serializers import ShorterIRLSerializer
from .models import URLInfo
class ShorterAPIView(APIView):
serializer_class = ShorterIRLSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
url_info = URLInfo.objects.filter(full_url=serializer.validated_data.get('url'))
if not url_info:
url_info = URLInfo.objects.create(full_url=serializer.validated_data.get('url'),
url_hash=md5(serializer.validated_data.get('url').encode()).hexdigest()[:10]
)
else:
url_info = url_info.first()
data = {
"full_url": url_info.full_url,
"hash_url": url_info.url_hash,
"clicks": url_info.clicks
}
return Response(data, status=status.HTTP_200_OK)
class GetFullURLAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request, url_hash, *args, **kwargs):
url_info = URLInfo.objects.filter(url_hash=url_hash)
if url_info:
url_info = url_info.first()
data = {
"full_url": url_info.full_url,
"hash_url": url_info.url_hash,
"clicks": url_info.clicks
}
return Response(data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_404_NOT_FOUND)
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,645
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/shorturl/utils/apps.py
|
from django.conf import settings
def get_api_url(name='api', version=settings.API_VERSION, app_name='', url_name=''):
url = '{0}/{1}/'.format(name, version)
if app_name and url_name:
url = '{0}{1}/{2}/'.format(url, app_name, url_name)
elif app_name and not url_name:
url = '{0}{1}/'.format(url, app_name)
elif url_name and not app_name:
url = '{0}{1}/'.format(url, url_name)
return url
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,646
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/core/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class URLInfo(models.Model):
full_url = models.URLField(unique=True, null=False, blank=False, help_text=_('full url'))
url_hash = models.URLField(unique=True, null=False, blank=False, help_text=_('short url'))
clicks = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.url_hash} - {self.full_url} - {self.clicks}'
class Meta:
db_table = 'urlinfo'
verbose_name = _('UrlInfo')
verbose_name_plural = _('UrlInfo')
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,647
|
praneshsaminathan/url_shortener
|
refs/heads/main
|
/core/migrations/0001_initial.py
|
# Generated by Django 3.1.7 on 2021-03-01 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='URLInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_url', models.URLField(help_text='full url', unique=True)),
('url_hash', models.URLField(help_text='short url', unique=True)),
('clicks', models.PositiveIntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'UrlInfo',
'verbose_name_plural': 'UrlInfo',
'db_table': 'urlinfo',
},
),
]
|
{"/core/serializers.py": ["/core/models.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"], "/core/views.py": ["/core/serializers.py", "/core/models.py"]}
|
2,648
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/app/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-20 00:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='カテゴリ名')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='曜日')),
('text', models.CharField(max_length=255, verbose_name='時間')),
('sub', models.CharField(max_length=255, verbose_name='科目名')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Category', verbose_name='カテゴリ')),
],
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,649
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/keijiban/models.py
|
from django.db import models
class Posting(models.Model):
name = models.CharField(
max_length=64,
verbose_name='名前',
help_text='あなたの名前を入力してください',
)
message = models.TextField(
verbose_name='メッセージ',
help_text='メッセージを入力してください',
null=True,
)
subject = models.CharField(
max_length=64,
verbose_name='科目名',
null=True,
)
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name='登録日時',
)
pk_label = models.IntegerField(
null=True,
)
class PostingSubject(models.Model):
subject = models.CharField(
max_length=64,
verbose_name='科目名',
null=True,
)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,650
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/accounts/aes.py
|
import codecs
from Crypto.Cipher import AES
class aesEncryption:
# def PaddingMes(self,mes):
# mes_length = len(mes)
# len = round(mes_length / 16) #四捨五入
# new_mes = mes + " "*len
# return new_mes
def Encrypt(self,mes1):
secret = (b'\xf0\x0e3nE\xa1\x9a\xff\x7f\xf6r\xd6\xf4\x9c\xa9\xaa')
counter = (b'\xa7\r\xa5u\xd4\xa0h\xb2\x04\x19<8\x8e\xc6$\x82\xc8\x7f\xe9\x99\x0b3\xe3\x05\xe8\x999j-\xf1\xf7\xd5')
crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret)
mes_length = len(mes1)
leng = round(mes_length / 16) # 四捨五入
mes = mes1 + " " * leng
encrypted = crypto.encrypt(mes)
return encrypted
def Decrypt(self,mes2):
secret = (b'\xf0\x0e3nE\xa1\x9a\xff\x7f\xf6r\xd6\xf4\x9c\xa9\xaa')
counter = ( b'\xa7\r\xa5u\xd4\xa0h\xb2\x04\x19<8\x8e\xc6$\x82\xc8\x7f\xe9\x99\x0b3\xe3\x05\xe8\x999j-\xf1\xf7\xd5')
crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret)
mes = crypto.decrypt(mes2)
mes = codecs.decode(mes, 'utf-8')
decrypt = mes.strip()
return decrypt
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,651
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/migrations/0006_auto_20170703_2051.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-03 20:51
from __future__ import unicode_literals
from django.db import migrations
import encrypted_fields.fields
class Migration(migrations.Migration):
dependencies = [
('profiles', '0005_auto_20170703_1142'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='grade',
field=encrypted_fields.fields.EncryptedIntegerField(),
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,652
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/keijiban/views.py
|
# ページネーター
from django.core.paginator import (
Paginator, # ページネーター本体のクラス
EmptyPage, # ページ番号が範囲外だった場合に発生する例外クラス
PageNotAnInteger # ページ番号が数字でなかった場合に発生する例外クラス
)
from django.shortcuts import (
render,
redirect,
)
from .models import Posting
from .forms import PostingForm
from .models import PostingSubject
from .forms import PostingSubjectForm
from django.contrib import messages
from django.shortcuts import render, get_object_or_404
from polls.models import Poll
from profiles.models import UserProfile
from django.contrib.auth.models import User
def post_list(request):
posts = Poll.objects.all()
return render(request, 'keijiban/post_list.html', {'posts': posts})
def _get_page(list_, page_no, count=100):
"""ページネーターを使い、表示するページ情報を取得する"""
paginator = Paginator(list_, count)
try:
page = paginator.page(page_no)
except (EmptyPage, PageNotAnInteger):
# page_noが指定されていない場合、数値で無い場合、範囲外の場合は
# 先頭のページを表示する
page = paginator.page(1)
return page
def index(request,pk):
"""表示・投稿を処理する"""
posts = get_object_or_404(Poll, pk=pk)
# 教科名と投稿名者をフォームにあらかじめ登録しておく設定
if not request.user.is_authenticated():
#ログインされていない場合は投稿者名が@名無しの電大生になる
form = PostingForm(initial={'subject':posts.subname , 'name':"@名無しの電大生", 'pk_label':-1})
else:
#ログインされている場合は投稿者名がプロフィールの名前になる
email = request.user.email
info_personal = UserProfile.objects.get(email = email)
#ユーザプロフィールへのリンク情報を付加
link_profile = UserProfile.objects.all()
for tmp in link_profile:
if tmp.email == email:
pk_link = tmp.pk
form = PostingForm(initial={'subject':posts.subname , 'name':info_personal.name, 'pk_label':pk_link})
if request.method == 'POST':
# ModelFormもFormもインスタンスを作るタイミングでの使い方は同じ
form = PostingForm(request.POST or None)
if form.is_valid():
# save()メソッドを呼ぶだけでModelを使ってDBに登録される。
form.save()
# メッセージフレームワークを使い、処理が成功したことをユーザーに通知する
messages.success(request, '投稿を受付ました。')
return redirect('keijiban:index',pk=pk)
else:
# メッセージフレームワークを使い、処理が失敗したことをユーザーに通知する
messages.error(request, '入力内容に誤りがあります。')
#リストを作成し、該当する講義のデータのみ抽出する
db_posts = Posting.objects.order_by('-subject')
post_list = ["temp"]
for temp in db_posts:
if temp.subject == posts.subname:
post_list.append(temp)
#リストの表示設定
post_list.pop(0)
post_list.reverse()
page = _get_page(
# Posting.objects.order_by('-id'), # 投稿を新しい順に並び替えて取得する
post_list,
request.GET.get('page') # GETクエリからページ番号を取得する
)
contexts = {
'page': page,
'posts': posts,
'form': form,
}
return render(request, 'keijiban/index.html', contexts)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,653
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/migrations/0007_auto_20170703_2053.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-03 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import encrypted_fields.fields
class Migration(migrations.Migration):
dependencies = [
('profiles', '0006_auto_20170703_2051'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='grade',
field=models.CharField(max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='name',
field=encrypted_fields.fields.EncryptedCharField(max_length=254),
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,654
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/keijiban/urls.py
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='list'),
url(r'^index/(?P<pk>[0-9]+)/$', views.index, name='index'),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,655
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/app/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^csv_import/$', views.csv_import, name='csv_import'),
url(r'^csv_export/$', views.csv_export, name='csv_export'),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,656
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/polls/admin.py
|
from django.contrib import admin
from polls.models import Poll , Choice
admin.site.register(Poll)
admin.site.register(Choice)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,657
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/app/views.py
|
import csv
from io import TextIOWrapper, StringIO
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views import generic
from .models import Post
from polls.models import Poll ,Choice
class IndexView(generic.ListView):
model = Post
def csv_import(request):
q_array = ['q1','q2','q3']
form_data = TextIOWrapper(
request.FILES['csv'].file, encoding='utf-8')
if form_data:
csv_file = csv.reader(form_data)
for line in csv_file:
post, _ = Post.objects.get_or_create(pk=line[0])
post.title = line[1]
post.text = line[2]
post.sub = line[3]
mypoll = Poll()
mypoll.subname = line[3]
mypoll.question1 = "課題の難易度 "
mypoll.question2 = "テストの難易度 "
mypoll.question3 = "課題の量 "
for q in q_array:
mychoice = Choice()
mychoice.subname = line[3]
mychoice.value = q
mychoice.save()
# category, _ = Category.objects.get_or_create(name=line[4])
post.category = line[4]
post.when = line[5]
post.save()
mypoll.save()
return redirect('app:index')
def csv_export(request):
memory_file = StringIO()
writer = csv.writer(memory_file)
for post in Post.objects.all():
row = [post.pk, post.title, post.text, post.sub, post.category,post.when]
writer.writerow(row)
response = HttpResponse(
memory_file.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=db.csv'
return response
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,658
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/accounts/views.py
|
from django.conf import settings
from django.contrib.auth import views as auth_views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
#from myUserModel.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.template.loader import get_template
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.views import generic
from profiles.models import UserProfile
from pprint import pprint
from django.http import HttpResponse
from .forms import (
RegisterForm,
LoginForm,
ChangePasswordForm,
ForgetPasswordForm,
PasswordConfirmForm,
)
#ユーザー登録
class CreateUserView(generic.FormView):
template_name = 'accounts/create.html'
form_class = RegisterForm
success_url = reverse_lazy('accounts:create_done')
def form_valid(self,form):
user = form.save(commit=False)
user.is_active = False
user.email = user.username
user.save()
current_site = get_current_site(self.request)
domain = current_site.domain
# subject_template = get_template('mailtemplate/subject.txt')
message_template = get_template('mailtemplate/message.txt')
context = {
'protocol': 'https' if self.request.is_secure() else 'http',
'domain': domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': default_token_generator.make_token(user),
'user': user,
}
#subject = subject_template.render(context)
message = message_template.render(context)
from_email = settings.EMAIL_HOST_USER
to = [user.username]
send_mail('ご登録ありがとうございます',
message,
from_email,
to
)
return super(CreateUserView, self).form_valid(form)
class CreateDoneView(generic.TemplateView):
template_name = "accounts/create_done.html"
class CreateCompleteView(generic.TemplateView):
template_name = 'accounts/create_complete.html'
def get(self, request, **kwargs):
token = kwargs.get("token")
uidb64 = kwargs.get("uidb64")
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user and not user.is_active and default_token_generator.check_token(user, token):
user.is_active = True
user.save()
createprofile = UserProfile()
createprofile.name = '名無しの電大生'
createprofile.email = user.email
createprofile.save()
return super(CreateCompleteView, self).get(request, **kwargs)
else:
raise Http404
def password_reset(request):
context = {
'post_reset_redirect': reverse_lazy('accounts:password_reset_done'),
'template_name': 'accounts/password_reset_form.html',
'email_template_name': 'mailtemplate/password_reset/message.txt',
'subject_template_name': 'mailtemplate/password_reset/subject.txt',
'password_reset_form': ForgetPasswordForm,
}
return auth_views.password_reset(request, **context)
def password_reset_done(request):
context = {
'template_name': 'accounts/password_reset_done.html',
}
return auth_views.password_reset_done(request, **context)
def password_reset_confirm(request, uidb64, token):
context = {
'uidb64': uidb64,
'token': token,
'post_reset_redirect': reverse_lazy('accounts:password_reset_complete'),
'template_name': 'accounts/password_reset_confirm.html',
'set_password_form': PasswordConfirmForm,
}
return auth_views.password_reset_confirm(request, **context)
def password_reset_complete(request):
context = {
'template_name': 'accounts/password_reset_complete.html',
}
return auth_views.password_reset_complete(request, **context)
def login(request):
context = {
'template_name': 'accounts/login.html',
'authentication_form': LoginForm
}
return auth_views.login(request, **context)
def logout(request):
context = {
'template_name': 'accounts/login.html'
}
return auth_views.logout(request, **context)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,659
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/timetable/admin.py
|
from django.contrib import admin
from .models import Timetable1,Timetable2
admin.site.register(Timetable1)
admin.site.register(Timetable2)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,660
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/forms.py
|
from django import forms
from .models import UserProfile
GRADE_CHOICES = (
('1年', '1年生'),
('2年', '2年生'),
('3年', '3年生'),
('4年', '4年生'),
('院1年', '院1年生'),
('院2年', '院2年生'),
('教員', '教員'),
)
MAJOR_CHOICES = (
('RB', 'RB'),
('RD', 'RD'),
('RG', 'RG'),
('RT', 'RT'),
('RU', 'RT'),
)
class UserProfileForm(forms.ModelForm):
name = forms.CharField(label="名前", required=True)
text = forms.CharField(label="コメント", widget=forms.Textarea)
class Meta:
model = UserProfile
fields = ('name', 'grade', 'major', 'text')
grade = forms.ChoiceField(
label='学年',
widget=forms.Select,
choices=GRADE_CHOICES,
required=False,
)
major = forms.ChoiceField(
label='学系',
widget=forms.Select,
choices=MAJOR_CHOICES,
required=False,
)
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['name'].widget.attrs['placeholder'] = '名前'
self.fields['text'].widget.attrs['class'] = 'form-control'
self.fields['text'].widget.attrs['placeholder'] = 'コメント'
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,661
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/timetable/urls.py
|
from django.conf.urls import url
from .import views
urlpatterns = [
url(r'^$', views.time_table, name = 'table'), #追加 7/9 山田
url(r'^timeedit/$', views.time_table2, name='edit'),
url(r'^result/$', views.show, name='result'),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,662
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/migrations/0004_auto_20170703_1043.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-03 10:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_auto_20170703_1040'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email',
field=models.EmailField(default='example@example.com', max_length=254),
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,663
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/app/models.py
|
from datetime import datetime
from django.db import models
class Post(models.Model):
title = models.CharField('曜日', max_length=255)
text = models.CharField('時間', max_length=255)
sub = models.CharField('科目名', max_length=255)
category = models.CharField('カテゴリ名', max_length=255 ,default='SOME STRING')
when = models.CharField('時期', max_length=255 ,default='SOME STRING')
def __str__(self):
return self.sub
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,664
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/keijiban/forms.py
|
from django import forms
from .models import Posting
from .models import PostingSubject
class PostingForm(forms.ModelForm):
name = forms.CharField(label="名前", required=True)
message = forms.CharField(label="メッセージ", widget=forms.Textarea)
class Meta:
model = Posting
fields = ('name','message','subject','pk_label')
# widgets = {
# 'name': forms.TextInput(attrs={'size': 40}),
# 'message': forms.Textarea(attrs={'cols': 80, 'rows': 20})
# }
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['name'].widget.attrs['placeholder'] = '名前'
self.fields['message'].widget.attrs['class'] = 'form-control'
self.fields['message'].widget.attrs['placeholder'] = 'メッセージ'
class PostingSubjectForm(forms.ModelForm):
class Meta:
model = PostingSubject
fields = ('subject',)
widgets = {
'subject': forms.TextInput(attrs={'size': 40})
}
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
self.fields['subject'].widget.attrs['class'] = 'form-control'
self.fields['subject'].widget.attrs['placeholder'] = '教科'
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,665
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/app/migrations/0002_auto_20170708_0057.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-08 00:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='when',
field=models.CharField(default='SOME STRING', max_length=255, verbose_name='時期'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(default='SOME STRING', max_length=255, verbose_name='カテゴリ名'),
),
migrations.DeleteModel(
name='Category',
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,666
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/polls/views.py
|
# coding: UTF-8
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.views import generic
from polls.models import Poll, Choice
#
# 一覧表示
#
def poll_list(request):
posts = Poll.objects.all()
return render(request, 'poll_list/poll_list.html', {'posts': posts})
def poll_detail(request, pk):
post = get_object_or_404(Poll, pk=pk)
return render(request, 'poll_list/poll_detail.html', {'post': post})
# 投票
#
def vote(request):
name = request.POST["subname"]
choice = Choice.objects.filter(subname = name)
q1 = request.POST["select1"]
q2 = request.POST["select2"]
q3 = request.POST["select3"]
Q1 = choice[0]
Q2 = choice[1]
Q3 = choice[2]
if q1 == "e1" :
num = Q1.easy
Q1.easy = num+1
Q1.save()
elif q1 == "n1" :
num = Q1.normal
Q1.normal = num+1
Q1.save()
elif q1 == "h1" :
num = Q1.hard
Q1.hard = num+1
Q1.save()
if q2 == "e2" :
num = Q2.easy
Q2.easy = num+1
Q2.save()
elif q2 == "n2" :
num = Q2.normal
Q2.normal = num+1
Q2.save()
elif q2 == "h2" :
num = Q2.hard
Q2.hard = num+1
Q2.save()
if q3 == "e3" :
num = Q3.easy
Q3.easy = num+1
Q3.save()
elif q3 == "n3" :
num = Q3.normal
Q3.normal = num+1
Q3.save()
elif q3 == "h3" :
num = Q3.hard
Q3.hard = num+1
Q3.save()
return render(request, 'poll_list/poll_result.html', {'Q1' :Q1,'Q2': Q2,'Q3' : Q3 })
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,667
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/timetable/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-08 00:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Timetable1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255, verbose_name='名前')),
('day', models.CharField(max_length=255, verbose_name='曜日')),
('time', models.CharField(max_length=255, verbose_name='時間')),
('sub', models.CharField(max_length=255, verbose_name='科目名')),
('when', models.CharField(max_length=255, verbose_name='時期')),
],
),
migrations.CreateModel(
name='Timetable2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255, verbose_name='名前')),
('day', models.CharField(max_length=255, verbose_name='曜日')),
('time', models.CharField(max_length=255, verbose_name='時間')),
('sub', models.CharField(max_length=255, verbose_name='科目名')),
('when', models.CharField(max_length=255, verbose_name='時期')),
],
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,668
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/polls/models.py
|
# coding: UTF-8
from django.db import models
#
# アンケート質問モデル
#
class Poll(models.Model):
subname = models.CharField(max_length=200)
question1 = models.CharField(max_length=200)
question2 = models.CharField(max_length=200)
question3 = models.CharField(max_length=200)
def __str__(self):
return self.subname
#
# アンケート選択モデル
#
class Choice(models.Model):
subname = models.CharField(max_length=200, default='SOME STRING')
value = models.CharField(max_length=200 , default='SOME STRING')
easy = models.IntegerField(default=0)
normal = models.IntegerField(default=0)
hard = models.IntegerField(default=0)
def __str__(self):
return self.subname
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,669
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/timetable/views.py
|
from django.shortcuts import render
from app.models import Post
from .models import Timetable1,Timetable2
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
# Create your views here.
def time_table(request):
posts = Post.objects.all()
#return HttpResponse(request.user.email)
return render(request, 'timetable/timetable.html', {'posts': posts})
@login_required
def time_table2(request):
username = request.user.email
subject = request.POST["timetable"]
# listnames = ["月1","月2","月3","月4","月5","火]
listnames = list()
when = ["後期"]
days = ["月","火","水","木","金"]
times = ["1","2","3","4","5"]
tuti = ["土1","土2","土3","土4"]
#検索するリストを作成
for day in days:
for time in times:
data = day + time
listnames.append(data)
for element in tuti:
listnames.append(element)
#listnamesで作られた、リストを用いて、request.Postで送信されたデータを保存
#何も選択されていない場合、保存しない
#現在保存されている時間割を、検索
user_timetable1 = Timetable1.objects.filter(username = username)
for day2 in listnames:
week = day2[0]
num = day2[1]
mytime = Timetable1()
if user_timetable1.count() == 0:
t1 = request.POST[day2]
if t1 == request.POST[day2]:
if t1 != 'null':
mytime.username = username
mytime.day = day2[0]
mytime.time = day2[1]
mytime.sub = t1
mytime.when = "前期"
mytime.save()
else:
t1 = request.POST[day2]
if t1 == request.POST[day2]:
if t1 != 'null':
for timetable in user_timetable1:
if timetable.day == week and timetable.time == num:
print(timetable.sub)
timetable.delete()
mytime.username = username
mytime.day = day2[0]
mytime.time = day2[1]
mytime.sub = t1
mytime.when = "前期"
mytime.save()
else:
mytime.username = username
mytime.day = day2[0]
mytime.time = day2[1]
mytime.sub = t1
mytime.when = "前期"
mytime.save()
listnames2 = list()
tuti2 = ["後期土1","後期土2","後期土3"]
for day in days:
for time in times:
data = when[0] + day + time
listnames2.append(data)
for elemnt2 in tuti2:
listnames2.append(elemnt2)
user_timetable2 = Timetable2.objects.filter(username=username)
for day2 in listnames2:
week = day2[2]
num = day2[3]
mytime = Timetable2()
if user_timetable2.count() == 0:
t1 = request.POST[day2]
if t1 == request.POST[day2]:
if t1 != 'null':
mytime.username = username
mytime.day = day2[2]
mytime.time = day2[3]
mytime.sub = t1
mytime.when = "後期"
mytime.save()
else:
t1 = request.POST[day2]
if t1 == request.POST[day2]:
if t1 != 'null':
for timetable in user_timetable2:
if timetable.day == week and timetable.time == num:
timetable.delete()
mytime.username = username
mytime.day = day2[2]
mytime.time = day2[3]
mytime.sub = t1
mytime.when = "後期"
mytime.save()
else:
mytime.username = username
mytime.day = day2[2]
mytime.time = day2[3]
mytime.sub = t1
mytime.when = "後期"
mytime.save()
return HttpResponseRedirect('/timetable/result')
def show(request):
username = request.user.email
post1 = Timetable1.objects.filter(username = username)
post2 = Timetable2.objects.filter(username = username)
return render(request, 'timetable/result.html', {'post1': post1,'post2': post2})
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,670
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/views.py
|
from django.shortcuts import render
from .models import UserProfile
from django.shortcuts import render, get_object_or_404
from .forms import UserProfileForm
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
def profile_detail(request, pk):
post = get_object_or_404(UserProfile, pk=pk)
#return HttpResponse(request.user.email)
return render(request, 'profiles/profile_detail.html', {'post': post})
@login_required
def profile_mydetail(request):
email = request.user.email
post = UserProfile.objects.get(email = email)
return render(request, 'profiles/profile_mydetail.html', {'post': post})
@login_required
def profile_edit(request):
email = request.user.email
post = UserProfile.objects.get(email = email)
if request.method == "POST":
#form = UserProfileForm(request.POST, instance=post)
post.name = request.POST["name"]
post.text = request.POST["text"]
post.major = request.POST["major"]
post.grade = request.POST["grade"]
post.save()
# if form.is_valid():
# post = form.save(commit=False)
# post.save()
# return redirect('profile_mydetail')
return redirect('profile_mydetail')
else:
form = UserProfileForm(instance=post)
return render(request, 'profiles/profile_edit.html', {'form': form})
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,671
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/models.py
|
from django.db import models
from django.utils import timezone
from encrypted_fields import EncryptedTextField ,EncryptedEmailField,EncryptedCharField,EncryptedIntegerField
class UserProfile(models.Model):
name = EncryptedCharField(max_length = 254)
email = models.EmailField(max_length= 254 , default = 'example@example.com')
grade = models.CharField(max_length = 254)
major = EncryptedCharField(max_length = 254)
text = EncryptedTextField()
# name = models.CharField(max_length = 20)
# email = models.EmailField(max_length = 254,default='example@me.com')
# grade = models.CharField(max_length = 5)
# major = models.CharField(max_length = 5)
# text = models.TextField()
def publish(self):
self.save()
def __str__(self):
return self.name
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,672
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/urls.py
|
#profilesの全てのviewをインポートするよ
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^detail/(?P<pk>[0-9]+)/$', views.profile_detail, name = 'profile_detail'),
url(r'^edit/$', views.profile_edit, name='profile_edit'),
url(r'^mydetail/$', views.profile_mydetail, name = 'profile_mydetail'),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,673
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/admin.py
|
from django.contrib import admin
from .models import UserProfile #UserProfileモデルをインポート
admin.site.register(UserProfile) #モデルをadminページで見るにはこれで登録
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,674
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/keijiban/admin.py
|
from django.contrib import admin
from keijiban.models import Posting
from keijiban.models import PostingSubject
# Register your models here.
admin.site.register(Posting)
admin.site.register(PostingSubject)
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,675
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/polls/urls.py
|
# coding: UTF-8
from django.conf.urls import url
from polls import views
urlpatterns = [
url(r'^$', views.poll_list, name = 'poll_list'),
url(r'^poll/(?P<pk>[0-9]+)/$', views.poll_detail, name = 'poll_detail'),
# ex: /polls/5/
# ex: /polls/5/results/
url(r'^vote/$', views.vote, name='vote'),
# ex: /polls/5/vote/
#url(r'^result/$',views.result,name='result'),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,676
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/migrations/0003_auto_20170703_1040.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-03 10:40
from __future__ import unicode_literals
from django.db import migrations
import encrypted_fields.fields
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_userprofile_email'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email',
field=encrypted_fields.fields.EncryptedEmailField(default='example@example.com', max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='grade',
field=encrypted_fields.fields.EncryptedCharField(max_length=5),
),
migrations.AlterField(
model_name='userprofile',
name='major',
field=encrypted_fields.fields.EncryptedCharField(max_length=5),
),
migrations.AlterField(
model_name='userprofile',
name='name',
field=encrypted_fields.fields.EncryptedCharField(max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='text',
field=encrypted_fields.fields.EncryptedTextField(),
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,677
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/accounts/forms.py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponse
from pprint import pprint
class RegisterForm(UserCreationForm):
#Djangoデフォルトログインではusernameとpasswordでログインするため
#今回はfirst_name をユーザーネームとして扱う
#username = email アドレスと考える
#required = Trueで登録時必須にする
# first_name = forms.CharField(label="ユーザーネーム", required=True)
class Meta:
model = User
fields = (
"username","password1","password2",
"email", "first_name",
)
def __init__(self, *args,**kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'
# self.fields['first_name'].widget.attrs['class'] = 'form-control'
# self.fields['first_name'].widget.attrs['placeholder'] = 'ユーザーネーム'
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['placeholder'] = 'パスワード'
self.fields['password2'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['placeholder'] = 'パスワード(確認)'
def clean_username(self):
username = self.cleaned_data["username"]
atmark = username.find('@')
string = username.find("dendai.ac.jp")
if(atmark < 0):
raise ValidationError("正しいメールアドレスを指定してください。")
if(atmark > string and string < 0):
raise ValidationError("電大メールを入力してください")
# try:
# validate_email(username)
# except ValidationError:
# raise ValidationError("正しいメールアドレスを指定してください。")
try:
self.user = User.objects.get(username=username)
except User.DoesNotExist:
return username
else:
raise ValidationError("既に存在するメールアドレスです。")
class LoginForm(AuthenticationForm):
#ログインフォーム作成
#username = email と考える
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.fields['username'].widget.attrs['class'] = 'form-control'
# self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'
#
# self.fields['password'].widget.attrs['class'] = 'form-control'
# self.fields['password'].widget.attrs['placeholder'] = 'パスワード'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'
self.fields['password'].widget.attrs['class'] = 'form-control'
self.fields['password'].widget.attrs['placeholder'] = 'パスワード'
class ForgetPasswordForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['placeholder'] = 'メールアドレス'
class ChangePasswordForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['old_password'].widget.attrs['class'] = 'form-control'
class PasswordConfirmForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['new_password1'].widget.attrs['class'] = 'form-control'
self.fields['new_password1'].widget.attrs['placeholder'] = '新パスワード'
self.fields['new_password2'].widget.attrs['class'] = 'form-control'
self.fields['new_password2'].widget.attrs['placeholder'] = '新パスワード(確認)'
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,678
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/profiles/migrations/0005_auto_20170703_1142.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-03 11:42
from __future__ import unicode_literals
from django.db import migrations
import encrypted_fields.fields
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_auto_20170703_1043'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='grade',
field=encrypted_fields.fields.EncryptedCharField(max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='major',
field=encrypted_fields.fields.EncryptedCharField(max_length=254),
),
]
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,679
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/home/views.py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect
from keijiban.models import Posting
from polls.models import Poll
from .models import Contact
from .forms import ContactForm
# Create your views here.
def show(request):
#最新スレッド5件を表示する処理
#全てのデータを1つのlistに集約
posts_list = Posting.objects.order_by('-created_at')
db_poll = Poll.objects.all()
pk_list = ["0"]
for post in posts_list:
for db_post in db_poll:
if post.subject == db_post.subname:
pk_list.append(db_post.pk)
#科目名が同じものを取り除く
pk_list.pop(0)
i = 0
n = len(pk_list)
while i < n:
j = 0
while j < i:
if pk_list[j] == pk_list[i]:
pk_list.pop(i)
n = n - 1
i = 0
break
j = j + 1
i = i + 1
#5件までにまとめる
n = len(pk_list)
count = 5
if n > 5:
while count < n:
pk_list.pop(count)
n = n - 1
#お問い合わせフォーム処理
form = ContactForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
form.save()
return redirect('home:contact')
contexts = {
'posts_list': posts_list,
'db_poll': db_poll,
'pk_list': pk_list,
'form':form,
}
return render(request,'home/home.html', contexts)
def contact(request):
return render(request, 'home/contact.html')
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,680
|
tduproject/kagikko2
|
refs/heads/master
|
/tdu/timetable/models.py
|
from datetime import datetime
from django.db import models
class Timetable1(models.Model):
username = models.CharField('名前', max_length=255)
day = models.CharField('曜日', max_length=255)
time = models.CharField('時間', max_length=255)
sub = models.CharField('科目名', max_length=255)
when = models.CharField('時期', max_length=255)
def __str__(self):
return self.username
class Timetable2(models.Model):
username = models.CharField('名前', max_length=255)
day = models.CharField('曜日', max_length=255)
time = models.CharField('時間', max_length=255)
sub = models.CharField('科目名', max_length=255)
when = models.CharField('時期', max_length=255)
def __str__(self):
return self.username
|
{"/tdu/keijiban/views.py": ["/tdu/keijiban/models.py", "/tdu/keijiban/forms.py"], "/tdu/app/views.py": ["/tdu/app/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py"], "/tdu/profiles/views.py": ["/tdu/profiles/models.py", "/tdu/profiles/forms.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"]}
|
2,689
|
seosaju/SoupKitchen
|
refs/heads/master
|
/booth/admin.py
|
from django.contrib import admin
from .models import Booth, Company
class BoothInline(admin.TabularInline):
model = Booth
fields = ['name', 'contact', 'road_address']
@admin.register(Booth)
class BoothAdmin(admin.ModelAdmin):
list_display = ['name', 'contact', 'company', 'road_address']
search_fields = ['name']
@admin.register(Company)
class CompanyAdmin(admin.ModelAdmin):
search_fields = ['name']
inlines = [
BoothInline,
]
|
{"/booth/admin.py": ["/booth/models.py"], "/booth/views.py": ["/load_csv.py", "/booth/models.py"]}
|
2,690
|
seosaju/SoupKitchen
|
refs/heads/master
|
/load_csv.py
|
import csv
def load(path):
with open(path, 'r', encoding='cp949') as f:
reader = csv.reader(f)
csv_list = list(reader)[1:]
return csv_list
|
{"/booth/admin.py": ["/booth/models.py"], "/booth/views.py": ["/load_csv.py", "/booth/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.