index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
16,198
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/utils/functions.py
|
import torch
import logging
import torch.nn as nn
import numpy as np
import math
from .pytorch_ssim import *
import torch.nn.functional as F
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 1.0 * self.sum / self.count
def get_avg(self):
return self.avg
def get_count(self):
return self.count
def linear_warm_up_lr(optimizer, epoch, warm_up_steps, lr):
for param_group in optimizer.param_groups:
warm_lr = lr*(epoch+1.0)/warm_up_steps
param_group['lr'] = warm_lr
def get_logger(filename):
logger = logging.getLogger('train_logger')
while logger.handlers:
logger.handlers.pop()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename, 'w')
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s], ## %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def val(model, test_loader, factor=1.0, verbose=False, downsample = 8):
print('validation on whole images!')
model.eval()
mae, rmse = 0.0, 0.0
psnr = 0.0
ssim = 0.0
ssim_d = 0.0
psnr_d = 0.0
with torch.no_grad():
for it,data in enumerate(test_loader):
img, target, count = data[0:3]
img = img.cuda()
target = target.unsqueeze(1).cuda()
output = model(img)
if isinstance(output, tuple):
dmp, amp = output
hard_amp = (amp > 0.5).float()
dmp = dmp * hard_amp
else:
dmp = output
est_count = dmp.sum().item()/factor
if verbose:
print('gt:{:.1f}, est:{:.1f}'.format(count.item(),est_count))
elif it < 10:
print('gt:{:.1f}, est:{:.1f}'.format(count.item(),est_count))
mae += abs(est_count - count.item())
rmse += (est_count - count.item())**2
if verbose:
mse = torch.mean((dmp - target)**2).float()
psnr += 10 * math.log(1.0/mse, 10)
ssim += cal_ssim(dmp, target)
dmp_d = F.interpolate(dmp, [target.shape[2]//downsample, target.shape[3]//downsample]) * downsample**2
target_d = F.interpolate(target, [target.shape[2]//downsample, target.shape[3]//downsample]) * downsample**2
mse_d = torch.mean((dmp_d - target_d)**2).float()
psnr_d += 10 * math.log(1.0/mse_d, 10)
ssim_d += cal_ssim(dmp_d, target_d)
mae /= len(test_loader)
rmse /= len(test_loader)
rmse = rmse**0.5
psnr /= len(test_loader)
ssim /= len(test_loader)
psnr_d /= len(test_loader)
ssim_d /= len(test_loader)
if verbose:
print('psnr:{:.2f}, ssim:{:.4f}, psnr of 1/8 size:{:.2f}, ssim of 1/8 size:{:.2f}'.format(psnr, ssim, psnr_d, ssim_d))
return mae, rmse
def test_ssim():
img1 = (torch.rand(1, 1, 16, 16))
img2 = (torch.rand(1, 1, 16, 16))
if torch.cuda.is_available():
img1 = img1.cuda()
img2 = img2.cuda()
print(torch.max(img1))
print(torch.max(img2))
print(max(torch.max(img1),torch.max(img2)))
print(cal_ssim(img1, img2).float())
# validate on bayes dataloader
def val_bayes(model, test_loader, factor=1.0, verbose=False):
print('validation on bayes loader!')
model.eval()
epoch_res=[]
for it,(inputs, count, name) in enumerate(test_loader):
inputs = inputs.cuda()
# inputs are images with different sizes
assert inputs.size(0) == 1, 'the batch size should equal to 1 in validation mode'
with torch.set_grad_enabled(False):
outputs = model(inputs)
est = torch.sum(outputs).item()/factor
res = count[0].item() - est
if verbose:
print('gt:{:.1f}, est:{:.1f}'.format(count[0].item(),torch.sum(outputs).item()))
elif it<10:
print('gt:{:.1f}, est:{:.1f}'.format(count[0].item(),torch.sum(outputs).item()))
epoch_res.append(res)
epoch_res = np.array(epoch_res)
rmse = np.sqrt(np.mean(np.square(epoch_res)))
mae = np.mean(np.abs(epoch_res))
return mae, rmse
# validate with 4 non-overlapping patches
def val_patch(model, test_loader, factor=1.0, verbose=False):
print('validaiton on 4 quarters!')
model.eval()
mae, rmse = 0.0, 0.0
with torch.no_grad():
for it, data in enumerate(test_loader):
img, _, count = data[0:3]
h,w = img.shape[2:]
h_d = h//2
w_d = w//2
img_1 = (img[:,:,:h_d,:w_d].cuda())
img_2 = (img[:,:,:h_d,w_d:].cuda())
img_3 = (img[:,:,h_d:,:w_d].cuda())
img_4 = (img[:,:,h_d:,w_d:].cuda())
img_patches = [img_1, img_2, img_3, img_4]
est_count = 0
for img_p in img_patches:
output = model(img_p)
if isinstance(output, tuple):
dmp, amp = output
dmp = dmp *amp
else:
dmp = output
est_count += dmp.sum().item()/factor
if verbose:
print('gt:{:.1f}, est:{:.1f}'.format(count.item(),est_count))
elif it < 10:
print('gt:{:.1f}, est:{:.1f}'.format(count.item(),est_count))
mae += abs(est_count - count.item())
rmse += (est_count - count.item())**2
mae /= len(test_loader)
rmse /= len(test_loader)
rmse = rmse**0.5
return mae, rmse
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,199
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/sanet.py
|
import torch.nn as nn
import torch.nn.functional as F
def ConvINReLU(cfg, in_planes):
layers = []
for c in cfg:
if len(c)==2:
plane, k = c
layers += [nn.Conv2d(in_planes, plane, k, padding=(k-1)//2), nn.InstanceNorm(plane, affine=True), nn.ReLU(True)]
in_planes = plane
if len(c)==1:
# im not sure
layers += [nn.ConvTranspose2d(in_planes, in_planes, 2, stride=2), nn.InstanceNorm(in_planes, affine=True), nn.ReLU(True)]
return nn.Sequential(*layers)
class SAModule(nn.Module):
def __init__(self, reduc, in_planes, out_planes):
super(self, SAModule).__init__()
# if there is reduction
self.reduc = reduc
sub_planes = out_planes // 4
self.branch1 = nn.Sequential(nn.Conv2d(in_planes, sub_planes, 1), nn.ReLU(True))
if self.reduc:
self.branch2 = ConvINReLU(((in_planes // 2, 1), (sub_planes, 3)),in_planes)
self.branch3 = ConvINReLU(((in_planes // 2, 1), (sub_planes, 5)),in_planes)
self.branch4 = ConvINReLU(((in_planes // 2, 1), (sub_planes, 7)),in_planes)
else:
self.branch2 = ConvINReLU(((sub_planes, 3)),in_planes)
self.branch3 = ConvINReLU(((sub_planes, 5)),in_planes)
self.branch4 = ConvINReLU(((sub_planes, 7)),in_planes)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
out = torch.cat([out1, out2, out3, out4], 1)
return out
class SANet(nn.Module):
def __init__(self):
super(self, SANet).__init__()
#first
self.encoder = nn.Sequential(SAModule(False, 3, 64), nn.MaxPool2d(kernel_size=2, stride=2),
SAModule(True, 64, 128), nn.MaxPool2d(kernel_size=2, stride=2),
SAModule(True, 128, 128), nn.MaxPool2d(kernel_size=2, stride=2),
SAModule(True, 128, 64))
self.decoder_cfg = ([64, 9], [T], [32, 7], [T], [16, 5], [T], [16, 3], [16, 5], [1, 1])
self.decoder = ConvINReLU(cfg=self.decoder_cfg, in_planes=64)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,200
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/Res50_C3.py
|
import torch.nn as nn
import torch
from torchvision import models
from .utils import *
import torch.nn.functional as F
def initialize_weights(models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m,nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print( m )
class Res50(nn.Module):
def __init__(self, pretrained=True, bn=True):
super(Res50, self).__init__()
self.bk1 = make_layers([512,512,512], in_channels=1024, dilation=True, batch_norm=False)
self.bk2 = conv_act(512, 256, 3, same_padding=True, NL='relu', dilation=2, bn=False)
self.bk3 = conv_act(256, 128, 3, same_padding=True, NL='relu', dilation=2, bn=False)
self.output_layer = conv_act(128, 1, 1, same_padding=True, NL='relu', bn=False)
'''
self.de_pred = nn.Sequential(conv_act(1024, 128, 1, same_padding=True, NL='relu'),
conv_act(128, 1, 1, same_padding=True, NL='relu'))
'''
initialize_weights(self.modules())
res = models.resnet50(pretrained=True)
#res.load_state_dict(torch.load("/home/datamining/Models/resnet50-19c8e357.pth"))
self.frontend = nn.Sequential(
res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2
)
self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 6, stride=1)
self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
def forward(self,x_in):
x = self.frontend(x_in)
x = self.own_reslayer_3(x) #1/8
x = F.interpolate(x,scale_factor=2)
#x = self.de_pred(x)
x = self.bk1(x) #1/4
x = F.interpolate(x,scale_factor=2)
x = self.bk2(x) #1/2
x = F.interpolate(x,size=x_in.shape[2:]) # input size
x = self.bk3(x)
x = self.output_layer(x)
return x
def make_res_layer(block, planes, blocks, stride=1):
downsample = None
inplanes=512
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,201
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/u_vgg.py
|
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from .utils import *
class U_VGG(nn.Module):
def __init__(self, load_model='', downsample=1, bn=False, NL='swish', objective='dmp', sp=False, se=True, pyramid=''):
super(U_VGG, self).__init__()
self.downsample = downsample
self.bn = bn
self.NL = NL
self.objective = objective
self.pyramid = pyramid
self.front0 = make_layers([64, 64], in_channels=3, batch_norm=bn, NL=self.NL)
self.front1 = make_layers(['M', 128, 128], in_channels=64, batch_norm=bn, NL=self.NL)
self.front2 = make_layers(['M', 256, 256, 256], in_channels=128, batch_norm=bn, NL=self.NL)
self.front3 = make_layers(['M', 512, 512, 512], in_channels=256, batch_norm=bn, NL=self.NL)
self.sp = sp
if sp:
print('use sp module')
self.sp_module = SPModule(512)
# basic cfg for backend is [512, 512, 256, 128, 64, 64]
if not self.pyramid:
self.backconv0 = make_layers([512, 512, 256], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv1 = make_layers([128], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv2 = make_layers([64], in_channels=256, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv3 = make_layers([64], in_channels=128, dilation=True, batch_norm=bn, NL=self.NL, se=se)
elif self.pyramid == 'dilation':
print('use dilation pyramid in backend')
self.backconv0 = nn.Sequential(DilationPyramid(512, 128), DilationPyramid(512, 128), DilationPyramid(512, 128))
self.backconv1 = nn.Sequential(DilationPyramid(512, 64))
self.backconv2 = nn.Sequential(DilationPyramid(256, 32))
self.backconv3 = nn.Sequential(DilationPyramid(128, 16))
elif self.pyramid == 'size':
print('use size pyramid in backend')
self.backconv0 = nn.Sequential(SizePyramid(512, 128), SizePyramid(512, 128), SizePyramid(512, 128))
self.backconv1 = nn.Sequential(SizePyramid(512, 64))
self.backconv2 = nn.Sequential(SizePyramid(256, 32))
self.backconv3 = nn.Sequential(SizePyramid(128, 16))
elif self.pyramid == 'depth':
print('use depth pyramid in backend')
self.backconv0 = nn.Sequential(DepthPyramid(512, 128), DepthPyramid(512, 128), DepthPyramid(512, 128))
self.backconv1 = nn.Sequential(DepthPyramid(512, 64))
self.backconv2 = nn.Sequential(DepthPyramid(256, 32))
self.backconv3 = nn.Sequential(DepthPyramid(128, 16))
# objective is density map(dmp) and (binary) attention map(amp)
if self.objective == 'dmp+amp':
print('objective dmp+amp!')
self.amp_process = make_layers([64,64], in_channels=64, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.amp_layer = nn.Conv2d(64, 1, kernel_size=1)
self.sgm = nn.Sigmoid()
elif self.objective == 'dmp':
print('objective dmp')
else:
raise Exception('objective must in [dmp, dmp+amp]')
self.output_layer = nn.Conv2d(64, 1, kernel_size=1)
self.load_model = load_model
#self._init_weights()
self._random_init_weights()
def forward(self, x_in):
x1 = self.front0(x_in)#1 size, 64
x2 = self.front1(x1)#1/2 size, 128
x3 = self.front2(x2)#1/4 size, 256
x4 = self.front3(x3)#1/8 size, 512
if self.sp:
x4 = self.sp_module(x4)
x = self.backconv0(x4) #1/8 size, 512
x = F.interpolate(x, size=[s//4 for s in x_in.shape[2:]]) #1/4 size, 256
x = torch.cat([x3, x], dim=1) #1/4 size,
x = self.backconv1(x) #1/4 size,
x = F.interpolate(x, size=[s//2 for s in x_in.shape[2:]]) #1/2 size,
x = torch.cat([x2, x], dim=1) #1/2 size,
x = self.backconv2(x) #1/2 size,
x = F.interpolate(x, size=x_in.shape[2:]) #1 size,
x = torch.cat([x1, x], dim=1) #1 size,
x = self.backconv3(x) #1 size, 64
if self.objective == 'dmp+amp':
dmp = self.output_layer(x)
amp = self.amp_layer(self.amp_process(x))
amp = self.sgm(amp)
dmp = amp * dmp
del x
dmp = torch.abs(dmp)
return dmp, amp
else:
x = self.output_layer(x)
x = torch.abs(x)
return x
def _random_init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _init_weights(self):
if not self.load_model:
pretrained_dict = dict()
model_dict = self.state_dict()
path = "/home/datamining/Models/vgg16_bn-6c64b313.pth" if self.bn else '/home/datamining/Models/vgg16-397923af.pth'
pretrained_model = torch.load(path)
self._random_init_weights()
# load the pretrained vgg16 parameters
for i, (k, v) in enumerate(pretrained_model.items()):
#print(i, k)
if i < 4:
layer_id = 0
module_id = k.split('.')[-2]
elif i < 8:
layer_id = 1
module_id = int(k.split('.')[-2]) - 4
elif i < 14:
layer_id = 2
module_id = int(k.split('.')[-2]) - 9
elif i < 20:
layer_id = 3
module_id = int(k.split('.')[-2]) - 16
else:
break
k = 'front' + str(layer_id) + '.' + str(module_id) + '.' + k.split('.')[-1]
if k in model_dict and model_dict[k].size() == v.size():
print(k, ' parameters loaded!')
pretrained_dict[k] = v
print(path, 'weights loaded!')
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
else:
self.load_state_dict(torch.load(self.load_model))
print(self.load_model,' loaded!')
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,202
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/train_generic.py
|
import torch
import torchvision
import torch.nn as nn
import os
import glob
from modeling import *
import torchvision.transforms as transforms
from torch.optim import lr_scheduler
from dataset import *
import torch.nn.functional as F
from utils.functions import *
from utils import pytorch_ssim
import argparse
from tqdm import tqdm
from datasets.crowd import Crowd
from losses.bay_loss import Bay_Loss
from losses.post_prob import Post_Prob
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def get_loader(train_path, test_path, downsample_ratio, args):
train_img_paths = []
for img_path in glob.glob(os.path.join(train_path, '*.jpg')):
train_img_paths.append(img_path)
bg_img_paths = []
for bg_img_path in glob.glob(os.path.join('/home/datamining/Datasets/CrowdCounting/bg/', '*.jpg')):
bg_img_paths.append(bg_img_path)
if args.use_bg:
train_img_paths += bg_img_paths
test_img_paths = []
for img_path in glob.glob(os.path.join(test_path, '*.jpg')):
test_img_paths.append(img_path)
if args.loss=='bayes':
bayes_dataset = Crowd(train_path, args.crop_size, downsample_ratio, False, 'train')
train_loader = torch.utils.data.DataLoader(bayes_dataset, collate_fn=bayes_collate, batch_size=args.bs, shuffle=True, num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(Crowd(test_path, args.crop_size, downsample_ratio, False, 'val'),batch_size=1, num_workers=8, pin_memory=True)
elif args.bn>0:
bn_dataset=PatchSet(train_img_paths, transform, c_size=(args.crop_size,args.crop_size), crop_n=args.random_crop_n)
train_loader = torch.utils.data.DataLoader(bn_dataset, collate_fn=my_collate_fn, shuffle=True, batch_size=args.bs, num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(RawDataset(test_img_paths, transform, mode='one', downsample_ratio=downsample_ratio, test=True), shuffle=False, batch_size=1, pin_memory=True)
else:
single_dataset=RawDataset(train_img_paths, transform, args.crop_mode, downsample_ratio, args.crop_scale)
train_loader = torch.utils.data.DataLoader(single_dataset, shuffle=True, batch_size=1, num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(RawDataset(test_img_paths, transform, mode='one', downsample_ratio=downsample_ratio, test=True), shuffle=False, batch_size=1, pin_memory=True)
return train_loader, test_loader, train_img_paths, test_img_paths
def main(args):
# use gpu
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
cur_device=torch.device('cuda:{}'.format(args.gpu))
if args.loss=='bayes':
root = '/home/datamining/Datasets/CrowdCounting/sha_bayes_512/'
train_path = root+'train/'
test_path = root+'test/'
elif args.bn:
root = '/home/datamining/Datasets/CrowdCounting/sha_512_a/'
train_path = root+'train/'
test_path = root+'test/'
else:
if args.dataset=='sha':
root = '/home/datamining/Datasets/CrowdCounting/shanghaitech/part_A_final/'
train_path = root+'train_data/images'
test_path = root+'test_data/images/'
elif args.dataset=='shb':
root = '/home/datamining/Datasets/CrowdCounting/shb_1024_f15/'
train_path = root+'train/'
test_path = root+'test/'
elif args.dataset =='qnrf':
root = '/home/datamining/Datasets/CrowdCounting/qnrf_1024_a/'
train_path = root+'train/'
test_path = root+'test/'
downsample_ratio = args.downsample
train_loader, test_loader, train_img_paths, test_img_paths = get_loader(train_path, test_path, downsample_ratio, args)
model_dict = {'VGG16_13': M_CSRNet, 'DefCcNet':DefCcNet, 'Res50_back3':Res50, 'InceptionV3':Inception3CC, 'CAN':CANNet}
model_name = args.model
dataset_name = args.dataset
net = model_dict[model_name](downsample=args.downsample, bn=args.bn>0, objective=args.objective, sp=(args.sp>0),se=(args.se>0),NL=args.nl)
net.cuda()
if args.bn>0:
save_name = '{}_{}_{}_bn{}_ps{}_{}'.format(model_name, dataset_name, str(int(args.bn)), str(args.crop_size),args.loss)
else:
save_name = '{}_d{}{}{}{}{}_{}_{}_cr{}_{}{}{}{}{}{}'.format(model_name, str(args.downsample), '_sp' if args.sp else '', '_se' if args.se else '', '_'+args.nl if args.nl!='relu' else '', '_vp' if args.val_patch else '', dataset_name, args.crop_mode, str(args.crop_scale), args.loss, '_wu' if args.warm_up else '', '_cl' if args.curriculum=='W' else '', '_v'+str(int(args.value_factor)) if args.value_factor!=1 else '', '_amp'+str(args.amp_k) if args.objective=='dmp+amp' else '', '_bg' if args.use_bg else '')
save_path = "/home/datamining/Models/CrowdCounting/"+save_name+".pth"
logger = get_logger('logs/'+save_name+'.txt')
for k, v in args.__dict__.items(): # save args
logger.info("{}: {}".format(k, v))
if os.path.exists(save_path) and args.resume:
net.load_state_dict(torch.load(save_path))
print('{} loaded!'.format(save_path))
value_factor=args.value_factor
freq = 100
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.decay)
elif args.optimizer == 'SGD':
# not converage
optimizer=torch.optim.SGD(net.parameters(),lr=args.lr, momentum=0.95, weight_decay=args.decay)
if args.loss=='bayes':
bayes_criterion=Bay_Loss(True, cur_device)
post_prob=Post_Prob(sigma=8.0,c_size=args.crop_size,stride=1,background_ratio=0.15,use_background=True,device=cur_device)
else:
mse_criterion = nn.MSELoss().cuda()
if args.scheduler == 'plt':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.9,patience=10, verbose=True)
elif args.scheduler == 'cos':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=50,eta_min=0)
elif args.scheduler == 'step':
scheduler = lr_scheduler.StepLR(optimizer,step_size=100, gamma=0.8)
elif args.scheduler == 'exp':
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
elif args.scheduler == 'cyclic' and args.optimizer == 'SGD':
scheduler = lr_scheduler.CyclicLR(optimizer, base_lr=args.lr*0.01, max_lr=args.lr, step_size_up=25,)
elif args.scheduler == 'None':
scheduler = None
else:
print('scheduler name error!')
if args.val_patch:
best_mae, best_rmse = val_patch(net, test_loader, value_factor)
elif args.loss=='bayes':
best_mae, best_rmse = val_bayes(net, test_loader, value_factor)
else:
best_mae, best_rmse = val(net, test_loader, value_factor)
if args.scheduler=='plt':
scheduler.step(best_mae)
ssim_loss = pytorch_ssim.SSIM(window_size=11)
for epoch in range(args.epochs):
if args.crop_mode == 'curriculum':
# every 20%, change the dataset
if (epoch+1) % (args.epochs//5) == 0:
print('change dataset')
single_dataset = RawDataset(train_img_paths, transform, args.crop_mode, downsample_ratio, args.crop_scale, (epoch+1.0+args.epochs//5)/args.epochs)
train_loader = torch.utils.data.DataLoader(single_dataset, shuffle=True, batch_size=1, num_workers=8)
train_loss = 0.0
if args.loss=='bayes':
epoch_mae = AverageMeter()
epoch_mse = AverageMeter()
net.train()
if args.warm_up and epoch < args.warm_up_steps:
linear_warm_up_lr(optimizer, epoch, args.warm_up_steps,args.lr)
for it, data in enumerate(train_loader):
if args.loss == 'bayes':
inputs, points, targets, st_sizes=data
img = inputs.to(cur_device)
st_sizes = st_sizes.to(cur_device)
gd_count = np.array([len(p) for p in points], dtype=np.float32)
points = [p.to(cur_device) for p in points]
targets = [t.to(cur_device) for t in targets]
else:
img, target, _, amp_gt = data
img = img.cuda()
target = value_factor*target.float().unsqueeze(1).cuda()
amp_gt = amp_gt.cuda()
#print(img.shape)
optimizer.zero_grad()
#print(target.shape)
if args.objective == 'dmp+amp':
output, amp = net(img)
output = output * amp
else:
output = net(img)
if args.curriculum == 'W':
delta = (output - target)**2
k_w = 2e-3 * args.value_factor * args.downsample**2
b_w = 5e-3 * args.value_factor * args.downsample**2
T = torch.ones_like(target,dtype=torch.float32) * epoch * k_w + b_w
W = T / torch.max(T,output)
delta = delta * W
mse_loss = torch.mean(delta)
else:
mse_loss = mse_criterion(output, target)
if args.loss == 'mse+lc':
loss = mse_loss + 1e2 * cal_lc_loss(output, target) * args.downsample
elif args.loss == 'ssim':
loss = 1 - ssim_loss(output, target)
elif args.loss == 'mse+ssim':
loss = 100 * mse_loss + 1e-2*(1-ssim_loss(output,target))
elif args.loss == 'mse+la':
loss = mse_loss + cal_spatial_abstraction_loss(output, target)
elif args.loss == 'la':
loss = cal_spatial_abstraction_loss(output, target)
elif args.loss == 'ms-ssim':
#to do
pass
elif args.loss == 'adversial':
# to do
pass
elif args.loss == 'bayes':
prob_list = post_prob(points, st_sizes)
loss = bayes_criterion(prob_list, targets, output)
else:
loss = mse_loss
# add the cross entropy loss for attention map
if args.objective == 'dmp+amp':
cross_entropy = (amp_gt * torch.log(amp) + (1 - amp_gt) * torch.log(1 - amp)) * -1
cross_entropy_loss = torch.mean(cross_entropy)
loss = loss + cross_entropy_loss * args.amp_k
loss.backward()
optimizer.step()
data_loss = loss.item()
train_loss += data_loss
if args.loss=='bayes':
N = inputs.size(0)
pre_count = torch.sum(output.view(N, -1), dim=1).detach().cpu().numpy()
res = pre_count - gd_count
epoch_mse.update(np.mean(res * res), N)
epoch_mae.update(np.mean(abs(res)), N)
if args.loss!='bayes' and it%freq==0:
print('[ep:{}], [it:{}], [loss:{:.8f}], [output:{:.2f}, target:{:.2f}]'.format(epoch+1, it, data_loss, output[0].sum().item(), target[0].sum().item()))
if args.val_patch:
mae, rmse = val_patch(net, test_loader, value_factor)
elif args.loss=='bayes':
mae, rmse = val_bayes(net, test_loader, value_factor)
else:
mae, rmse = val(net, test_loader, value_factor)
if not (args.warm_up and epoch<args.warm_up_steps):
if args.scheduler == 'plt':
scheduler.step(best_mae)
elif args.scheduler != 'None':
scheduler.step()
if mae + 0.1 * rmse < best_mae + 0.1 * best_rmse:
best_mae, best_rmse = mae, rmse
torch.save(net.state_dict(), save_path)
if args.loss=='bayes':
logger.info('{} Epoch {}/{} Loss:{:.8f},MAE:{:.2f},RMSE:{:.2f} lr:{:.8f}, [CUR]:{mae:.1f}, {rmse:.1f}, [Best]:{b_mae:.1f}, {b_rmse:.1f}'.format(model_name, epoch+1, args.epochs, train_loss/len(train_loader),epoch_mae.get_avg(), np.sqrt(epoch_mse.get_avg()),optimizer.param_groups[0]['lr'], mae=mae, rmse=rmse, b_mae=best_mae, b_rmse=best_rmse))
else:
logger.info('{} Epoch {}/{} Loss:{:.8f}, lr:{:.8f}, [CUR]:{mae:.1f}, {rmse:.1f}, [Best]:{b_mae:.1f}, {b_rmse:.1f}'.format(model_name, epoch+1, args.epochs, train_loss/len(train_loader), optimizer.param_groups[0]['lr'], mae=mae, rmse=rmse, b_mae=best_mae, b_rmse=best_rmse))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Crowd Counting')
parser.add_argument('--model', metavar='model name', default='VGG16_13', choices=['VGG16_13', 'DefCcNet', 'InceptionV3', 'CAN', 'Res50_back3'], type=str)
parser.add_argument('--downsample', metavar='downsample ratio', default=1, choices=[1, 2, 4, 8], type=int)
parser.add_argument('--dataset', metavar='dataset name', default='sha', choices=['sha','shb','qnrf'], type=str)
parser.add_argument('--resume', metavar='resume model if exists', default=0, type=int)
parser.add_argument('--lr', type=float, default=1e-5, help='the initial learning rate')
parser.add_argument('--gpu', default='0', help='assign device')
parser.add_argument('--scheduler', default='plt', help='lr scheduler', choices=['plt', 'cos', 'step', 'cyclic', 'exp', 'None'], type=str)
parser.add_argument('--optimizer', default='Adam', help='optimizer', choices=['Adam','SGD'], type=str)
parser.add_argument('--decay', default=1e-4, help='weight decay', type=float)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--loss', default='mse', choices=['mse','mse+lc','ssim','mse+ssim','mse+la','la','ms-ssim','bayes','adversial'])
parser.add_argument('--val_patch', metavar='val on patch if set to True', default=0, choices=[0,1], type=int)
parser.add_argument('--crop_mode', default='random', choices=['random', 'one', 'fixed+random', 'fixed', 'mixed', 'curriculum'], type=str)
parser.add_argument('--crop_scale', metavar='patch scale, work when not using batch norm or bayes', default=0.5, type=float)
parser.add_argument('--crop_size', default=256, help='the size of cropping from original images. Work when using batch norm or bayes', type=int)
parser.add_argument('--warm_up', default=0, help='warm up from 0.1*lr to lr by warm up steps', type=int)
parser.add_argument('--warm_up_steps', default=10, help='warm up steps', type=int)
parser.add_argument('--curriculum', default='None', metavar='curriculum learning', choices=['None','W'])
parser.add_argument('--value_factor', default=1.0, metavar='value factor * gt', type=float)
parser.add_argument('--objective', default='dmp', choices=['dmp', 'dmp+amp'], type=str)
parser.add_argument('--amp_k', default=0.1, help="only work when objective is 'dmp+amp'. loss = loss + k * cross_entropy_loss", type=float)
parser.add_argument('--use_bg', default=0, help='if using background images(without any person) in training', choices=[0,1], type=int)
parser.add_argument('--bn', default=0, help='if using batch normalization', type=int)
parser.add_argument('--bs', default=4, help='batch size if using bn', type=int)
parser.add_argument('--random_crop_n', default=4, metavar='random crop number for each image, only work when using bn', type=int)
parser.add_argument('--sp', default=0, help='spatial pyramid module', type=int)
parser.add_argument('--se', default=0, help='squeeze excitation module', type=int)
parser.add_argument('--nl', default='relu', help='non-linear layer', choices=['relu', 'prelu', 'swish'], type=str)
args = parser.parse_args()
main(args)
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,203
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/m_vgg.py
|
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from .utils import *
class M_VGG(nn.Module):
def __init__(self, load_model='', downsample=1, bn=False, NL='swish', objective='dmp', sp=False, se=True, pyramid=''):
super(M_VGG, self).__init__()
self.downsample = downsample
self.bn = bn
self.NL = NL
self.objective = objective
self.pyramid = pyramid
self.features_cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
self.features = make_layers(self.features_cfg, batch_norm=bn, NL=self.NL)
self.sp = False
if sp:
print('use sp module')
self.sp = True
self.sp_module = SPModule(512)
# basic cfg for backend is [512, 512, 512, 256, 128, 64]
if not self.pyramid:
self.backconv0 = make_layers([512, 512, 512], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv1 = make_layers([256], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv2 = make_layers([128], in_channels=256, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv3 = make_layers([64], in_channels=128, dilation=True, batch_norm=bn, NL=self.NL, se=se)
elif self.pyramid == 'dilation':
print('use dilation pyramid in backend')
self.backconv0 = nn.Sequential(DilationPyramid(512, 128), DilationPyramid(512, 128))
self.backconv1 = make_layers([256], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv2 = make_layers([128], in_channels=256, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv3 = make_layers([64], in_channels=128, dilation=True, batch_norm=bn, NL=self.NL, se=se)
elif self.pyramid == 'size':
print('use size pyramid in backend')
self.backconv0 = nn.Sequential(SizePyramid(512, 128), SizePyramid(512, 128))
self.backconv1 = make_layers([256], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv2 = make_layers([128], in_channels=256, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv3 = make_layers([64], in_channels=128, dilation=True, batch_norm=bn, NL=self.NL, se=se)
elif self.pyramid == 'depth':
print('use depth pyramid in backend')
self.backconv0 = nn.Sequential(DepthPyramid(512, 128), DepthPyramid(512, 128))
self.backconv1 = make_layers([256], in_channels=512, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv2 = make_layers([128], in_channels=256, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.backconv3 = make_layers([64], in_channels=128, dilation=True, batch_norm=bn, NL=self.NL, se=se)
# objective is density map(dmp) and (binary) attention map(amp)
if self.objective == 'dmp+amp':
print('objective dmp+amp!')
self.amp_process = make_layers([64,64], in_channels=64, dilation=True, batch_norm=bn, NL=self.NL, se=se)
self.amp_layer = nn.Conv2d(64, 1, kernel_size=1)
self.sgm = nn.Sigmoid()
elif self.objective == 'dmp':
print('objective dmp')
else:
raise Exception('objective must in [dmp, dmp+amp]')
self.output_layer = nn.Conv2d(64, 1, kernel_size=1)
self.load_model = load_model
self._init_weights()
def forward(self, x_in):
x = self.features(x_in)
if self.sp:
x = self.sp_module(x)
x = self.backconv0(x)
if self.downsample == 8:
x = F.interpolate(x, size=[s//8 for s in x_in.shape[2:]])
elif self.downsample<8:
x = F.interpolate(x, scale_factor=2)
#
x = self.backconv1(x)
if self.downsample == 4:
x = F.interpolate(x, size=[s//4 for s in x_in.shape[2:]])
elif self.downsample<4:
x = F.interpolate(x, scale_factor=2)
#
x = self.backconv2(x)
if self.downsample == 2:
x = F.interpolate(x, size=[s//2 for s in x_in.shape[2:]])
elif self.downsample<2:
x = F.interpolate(x, scale_factor=2)
x = self.backconv3(x)
if self.downsample == 1:
x = F.interpolate(x, size=x_in.shape[2:])
if self.objective == 'dmp+amp':
dmp = self.output_layer(x)
amp = self.amp_layer(self.amp_process(x))
amp = self.sgm(amp)
dmp = amp * dmp
del x
dmp = torch.abs(dmp)
return dmp, amp
else:
x = self.output_layer(x)
x = torch.abs(x)
return x
def _random_init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _init_weights(self):
if not self.load_model:
pretrained_dict = dict()
model_dict = self.state_dict()
path = "/home/datamining/Models/vgg16_bn-6c64b313.pth" if self.bn else '/home/datamining/Models/vgg16-397923af.pth'
pretrained_model = torch.load(path)
self._random_init_weights()
# load the pretrained vgg16 parameters
for k, v in pretrained_model.items():
if k in model_dict and model_dict[k].size() == v.size():
pretrained_dict[k] = v
print(k, ' parameters loaded!')
print(path, 'weights loaded!')
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
else:
self.load_state_dict(torch.load(self.load_model))
print(self.load_model,' loaded!')
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,215
|
rdarekar/selenium-python-framework
|
refs/heads/master
|
/tests/conftest.py
|
import pytest
from selenium import webdriver
from base.webdriverfactory import WebDriverFactory
from pages.home.login_page import LoginPage
@pytest.yield_fixture()
def setUp():
print("Running method level setUp")
yield
print("Running method level tearDown")
@pytest.yield_fixture(scope="class")
def oneTimeSetUp(request, browser, osType):
#print("Running conftest demo one time setUp")
print("Running one time setUp")
wdf = WebDriverFactory(browser)
driver = wdf.getWebDriverInstance()
lp = LoginPage(driver)
lp.login("test@email.com", "abcabc")
# if browser == "firefox":
# baseURL = "https://letskodeit.teachable.com/"
# driver = webdriver.Firefox()
# driver.maximize_window()
# driver.implicitly_wait(3)
# driver.get(baseURL)
# print("Running tests on FF")
# else:
# baseURL = "https://letskodeit.teachable.com/"
# driver = webdriver.Chrome()
# driver.maximize_window()
# driver.implicitly_wait(3)
# driver.get(baseURL)
# print("Running tests on Chrome")
# We need to add "value" attribute, the one we created above to the test class using "request" keyword
# If the class attribute from the request we are getting is not None then make the "value" guy as a class attribute
# so that it can be available to all the instance, the complete class we are going to use
if request.cls is not None:
request.cls.driver = driver
# The yield will return the value where a "fixture" is used. And "fixture" is used in "TestClassDemo"
yield driver
#driver.quit()
#print("Running conftest demo one time tearDown")
print("Running one time tearDown")
def pytest_addoption(parser):
parser.addoption("--browser")
parser.addoption("--osType", help="Type of Operating System")
@pytest.fixture(scope="session")
def browser(request):
return request.config.getoption("--browser")
@pytest.fixture(scope="session")
def osType(request):
return request.config.getoption("--osType")
|
{"/tests/courses/register_courses_csv_data.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_multiple_data_set.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_tests.py": ["/pages/courses/register_courses_page.py"]}
|
16,216
|
rdarekar/selenium-python-framework
|
refs/heads/master
|
/tests/courses/register_courses_csv_data.py
|
from pages.home.login_page import LoginPage
from pages.courses.register_courses_page import RegisterCoursesPage
from utilities.teststatus import TestStatus
import unittest
import pytest
from ddt import ddt, data, unpack
from utilities.read_data import getCSVData
from pages.home.navigation_page import NavigationPage
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
@ddt
class RegisterCoursesCSVDataTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetUp(self, oneTimeSetUp):
# self.lp = LoginPage(self.driver)
self.courses = RegisterCoursesPage(self.driver)
self.ts = TestStatus(self.driver)
self.nav = NavigationPage(self.driver)
def setUp(self):
self.driver.find_element_by_xpath("//a[@class='navbar-brand header-logo']").click()
# self.driver.get("https://learn.letskodeit.com/courses")
self.nav.navigateToAllCourses()
@pytest.mark.run(order=1)
@data(*getCSVData("D:\\sachin_thakare\\python_programs\\letskodeit\\framework_pom_screenshot\\testdata.csv"))
@unpack # This decorator will unpack all the tuple / list elements into multiple arguments
def test_invalidEnrollment(self, courseName, ccNum, ccExp, ccCVV, zip):
# self.lp.login("test@email.com", "abcabc")
self.courses.clickSearchBox()
self.courses.enterCourseName(courseName)
# self.courses.selectCourseToEnroll()
self.courses.selectCourseToEnroll(courseName)
self.courses.clickEnrollButton()
self.courses.enrollCourse(num=ccNum, exp=ccExp, cvv=ccCVV, zip=zip)
result = self.courses.verifyEnrollFailed()
self.ts.markFinal("test_invalidEnrollment", result, "Enrollment Failed Verification")
# self.driver.find_element_by_link_text("All Courses").click()
# Commenting the below lines, as we are using the same lines under setUp()
# self.driver.find_element_by_xpath("//a[@class='navbar-brand header-logo']").click()
# self.driver.get("https://learn.letskodeit.com/courses")
|
{"/tests/courses/register_courses_csv_data.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_multiple_data_set.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_tests.py": ["/pages/courses/register_courses_page.py"]}
|
16,217
|
rdarekar/selenium-python-framework
|
refs/heads/master
|
/tests/courses/register_courses_multiple_data_set.py
|
from pages.home.login_page import LoginPage
from pages.courses.register_courses_page import RegisterCoursesPage
from utilities.teststatus import TestStatus
import unittest
import pytest
from ddt import ddt, data, unpack
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
@ddt
class RegisterMultipleCoursesTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetUp(self, oneTimeSetUp):
# self.lp = LoginPage(self.driver)
self.courses = RegisterCoursesPage(self.driver)
self.ts = TestStatus(self.driver)
@pytest.mark.run(order=1)
@data(("JavaScript for beginners", "41811 5705 8102 5900", "10/21", "665", "411018"),
("Learn Python 3 from scratch", "41811 5705 8102 5900", "10/21", "665", "411018"))
@unpack # This decorator will unpack all the tuple / list elements into multiple arguments
def test_invalidEnrollment(self, courseName, ccNum, ccExp, ccCVV, zip):
# self.lp.login("test@email.com", "abcabc")
self.courses.clickSearchBox()
self.courses.enterCourseName(courseName)
# self.courses.selectCourseToEnroll()
self.courses.selectCourseToEnroll(courseName)
self.courses.clickEnrollButton()
self.courses.enrollCourse(num=ccNum, exp=ccExp, cvv=ccCVV, zip=zip)
result = self.courses.verifyEnrollFailed()
self.ts.markFinal("test_invalidEnrollment", result, "Enrollment Failed Verification")
# self.driver.find_element_by_link_text("All Courses").click()
self.driver.find_element_by_xpath("//a[@class='navbar-brand header-logo']").click()
# self.driver.get("https://learn.letskodeit.com/courses")
|
{"/tests/courses/register_courses_csv_data.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_multiple_data_set.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_tests.py": ["/pages/courses/register_courses_page.py"]}
|
16,218
|
rdarekar/selenium-python-framework
|
refs/heads/master
|
/pages/courses/register_courses_page.py
|
import utilities.custom_logger as cl
import logging
from base.basepage import BasePage
import time
class RegisterCoursesPage(BasePage):
log = cl.customLogger(logging.DEBUG)
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
# Locators
_search_box = "//input[@id='search-courses']"
_search_icon = "//button[@id='search-course-button']"
# _course = "//div[@data-course-id='56740']//div[contains(text(), 'JavaScript for beginners')]"
# _course = "//div[contains(@class,'course-listing-title') and contains(text(), 'JavaScript for beginners')]"
_course = "//div[contains(@class,'course-listing-title') and contains(text(), '{0}')]"
_all_courses = "//div[@class='course-listing-title']"
_enroll_button = "//button[@id='enroll-button-top']"
_cc_num = "//input[@aria-label='Credit or debit card number']"
_exp_date = "//input[@name='exp-date']"
_cc_cvv = "//input[@name='cvc']"
_zip = "//input[@name='postal']"
_terms_and_policies = "//input[@id='agreed_to_terms_checkbox']"
# _submit_enroll = "//button[@class='btn btn-primary spc__button is-disabled']"
_submit_enroll = "//button[@id='confirm-purchase']"
# Perform Actions
# Enter course name
def clickSearchBox(self):
self.elementClick(self._search_box, "xpath")
def enterCourseName(self, name):
self.sendKeys(name, self._search_box, "xpath")
self.elementClick(self._search_icon, "xpath")
# def selectCourseToEnroll(self):
# self.elementClick(self._course, "xpath")
def selectCourseToEnroll(self, fullCourseName):
self.elementClick(locator=self._course.format(fullCourseName), locatorType="xpath")
def clickEnrollButton(self):
self.elementClick(self._enroll_button, "xpath")
def enterCardNum(self, num):
# self.switchToFrame(name="__privateStripeFrame16")
# self.sendKeys(num, self._cc_num, "xpath")
time.sleep(7)
self.switchFrameByIndex(self._cc_num, locatorType="xpath")
self.sendKeysWhenReady(num, locator=self._cc_num, locatorType="xpath")
# self.sendKeysCustom(num, self._cc_num, "xpath")
self.switchToDefaultContent()
def enterCardExp(self, exp):
# self.switchToFrame(name="__privateStripeFrame17")
self.switchFrameByIndex(self._exp_date, locatorType="xpath")
self.sendKeys(exp, self._exp_date, "xpath")
self.switchToDefaultContent()
def enterCardCVV(self, cvv):
# self.switchToFrame(name="__privateStripeFrame18")
self.switchFrameByIndex(self._cc_cvv, locatorType="xpath")
self.sendKeys(cvv, self._cc_cvv, "xpath")
self.switchToDefaultContent()
def enterZip(self, zip):
# self.switchToFrame(name="__privateStripeFrame19")
self.switchFrameByIndex(self._zip, locatorType="xpath")
self.sendKeys(zip, self._zip, "xpath")
self.switchToDefaultContent()
def checkTermsAndPolicy(self):
self.elementClick(self._terms_and_policies, "xpath")
def clickEnrollSubmitButton(self):
self.elementClick(self._submit_enroll, "xpath")
def enterCreditCardInformation(self, num, exp, cvv, zip):
self.enterCardNum(num)
self.enterCardExp(exp)
self.enterCardCVV(cvv)
self.enterZip(zip)
self.checkTermsAndPolicy()
def enrollCourse(self, num="", exp="", cvv="", zip=""):
self.clickEnrollButton()
self.webScroll("down")
self.enterCreditCardInformation(num, exp, cvv, zip)
# This step of clicking on Enroll Submit button is not included in the framework by tutor
# self.clickEnrollSubmitButton()
def verifyEnrollFailed(self):
result = self.isEnabled(locator=self._submit_enroll, locatorType="xpath", info="Enroll Button")
return not result
|
{"/tests/courses/register_courses_csv_data.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_multiple_data_set.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_tests.py": ["/pages/courses/register_courses_page.py"]}
|
16,219
|
rdarekar/selenium-python-framework
|
refs/heads/master
|
/tests/courses/register_courses_tests.py
|
from pages.home.login_page import LoginPage
from pages.courses.register_courses_page import RegisterCoursesPage
from utilities.teststatus import TestStatus
import unittest
import pytest
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
class RegisterCoursesTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetUp(self, oneTimeSetUp):
self.lp = LoginPage(self.driver)
self.courses = RegisterCoursesPage(self.driver)
self.ts = TestStatus(self.driver)
def test_invalidEnrollment(self):
self.lp.login("test@email.com", "abcabc")
self.courses.clickSearchBox()
self.courses.enterCourseName("JavaScript")
# self.courses.selectCourseToEnroll()
self.courses.selectCourseToEnroll("JavaScript for beginners")
self.courses.clickEnrollButton()
self.courses.enrollCourse(num="4181 5705 8102 5900", exp="10/21", cvv="665", zip="411018")
result = self.courses.verifyEnrollFailed()
self.ts.markFinal("test_invalidEnrollment", result, "Enrollment Failed Verification")
|
{"/tests/courses/register_courses_csv_data.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_multiple_data_set.py": ["/pages/courses/register_courses_page.py"], "/tests/courses/register_courses_tests.py": ["/pages/courses/register_courses_page.py"]}
|
16,221
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/migrations/0007_auto_20200403_2152.py
|
# Generated by Django 3.0.3 on 2020-04-03 21:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AIP', '0006_trainee_attendance'),
]
operations = [
migrations.RenameField(
model_name='attendance',
old_name='register_time',
new_name='expire_time',
),
]
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,222
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/migrations/0004_result_c_email.py
|
# Generated by Django 3.0.3 on 2020-03-25 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AIP', '0003_auto_20200325_1353'),
]
operations = [
migrations.AddField(
model_name='result',
name='c_email',
field=models.CharField(default='', max_length=100),
),
]
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,223
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/migrations/0003_auto_20200325_1353.py
|
# Generated by Django 3.0.3 on 2020-03-25 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AIP', '0002_result_c_quiz_name'),
]
operations = [
migrations.AddField(
model_name='result',
name='c_total_ans_correct',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='result',
name='c_total_ans_incorrect',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='result',
name='c_total_q_asked',
field=models.IntegerField(default=0),
),
]
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,224
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/forms.py
|
from django.db import models
from django import forms
from .models import Question
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
labels = {
'q_subject' : 'Subject',
'q_cat' : 'Category',
'q_rank' : 'Rank',
'q_text' : 'Question',
'q_option1' : 'Answer Option1',
'q_option2' : 'Answer Option2',
'q_option3' : 'Answer Option3',
'q_option4' : 'Answer Option4',
'q_answer' : 'Answer',
}
fields = ('q_subject','q_cat','q_rank','q_text','q_option1','q_option2','q_option3','q_option4','q_answer')
#fields = '__all__'
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,225
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/urls.py
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.urls import path , include
from . import views
from django.views.generic.base import TemplateView # new
app_name = 'AIP'
urlpatterns = [
path('', views.index, name='index'),
path('pickskill/', views.pickskill, name='pickskill'),
path('begin/', views.begin, name='begin'),
path('quiz/', views.quiz, name='quiz'),
path('quizsimple/', views.quizsimple, name='quizsimple'),
path('upload/', views.upload, name='upload'),
path('comment/', views.comment, name='comment'),
path('question/', views.question, name='question'),
path('logout/', views.logout, name='logout'),
path('export/', views.export, name='export'),
path('add/', views.add, name='add'),
path('questionupload/', views.questionupload, name='questionupload'),
path('scores/', views.scores, name='scores'),
path('quizzes/', views.quizzes,name='quizzes'),
path('addquiz/', views.addquiz,name='addquiz'),
path('addquestion/', views.addquestion, name='addquestion'),
path('addquestion1/', views.addquestion1, name='addquestion1'),
path('quizlist/', views.quizbucket, name='quizbucket'),
path('quiz/<int:pk>/', views.takequiz, name='takequiz'),
path('searchquiz/<int:pk>/', views.searchquiz, name='searchquiz'),
path('scores/<int:pk>/', views.scores, name='scores'),
path('review/', views.review, name='review'),
path('review/<str:pk>/', views.reviewquiz, name='reviewquiz'),
path('markattendance/<int:pk>/', views.markattendance, name='markattendance'),
path('showattendance/<int:pk>/', views.showattendance, name='showattendance'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,226
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/admin.py
|
from django.contrib import admin
from . models import Question , Answer,Result,Quiz,Attendance,Trainee_Attendance
# Register your models here.
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Result)
admin.site.register(Quiz)
admin.site.register(Attendance)
admin.site.register(Trainee_Attendance)
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,227
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/migrations/0006_trainee_attendance.py
|
# Generated by Django 3.0.3 on 2020-04-03 21:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AIP', '0005_attendance'),
]
operations = [
migrations.CreateModel(
name='Trainee_Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trainee_email', models.CharField(default='', max_length=100)),
('login_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,228
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/models.py
|
from django.db import models
from datetime import datetime
# Create your models here.
class Attendance(models.Model):
trainer_name = models.CharField(max_length=200,default='')
trainee_emails = models.TextField(null=True)
expire_time = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.trainer_name
class Trainee_Attendance(models.Model):
trainee_email = models.CharField(max_length=100,default='')
login_time = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.trainee_email
class Quiz(models.Model):
quiz_name = models.CharField(max_length=100,default='')
quiz_OrgIdentifier = models.CharField(max_length=40,default='')
quiz_questions = models.TextField(null=True)
quiz_noofquest = models.IntegerField(default=0)
def __str__(self):
return self.quiz_name + "_" + self.quiz_OrgIdentifier
class Question(models.Model):
q_subject = models.CharField(max_length=40)
q_cat = models.CharField(max_length=40)
q_rank = models.CharField(max_length=20)
q_text = models.TextField(null=True,default='')
q_option1 = models.CharField(max_length=200,default='')
q_option2 = models.CharField(max_length=200,default='')
q_option3 = models.CharField(max_length=200,default='')
q_option4 = models.CharField(max_length=200,default='')
q_answer = models.CharField(max_length=20)
q_ask_time = models.DateTimeField(default=datetime.now, blank=True)
no_times_ques_served = models.IntegerField(default=0)
no_times_anwered_correctly = models.IntegerField(default=0)
no_times_anwered_incorrectly = models.IntegerField(default=0)
difficulty_score = models.DecimalField(default=0,max_digits = 5, decimal_places = 2)
def __str__(self):
return self.q_text
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
ans_option = models.CharField(max_length=20)
is_correct = models.BooleanField(default=False)
ans_time = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.ans_option
class Result(models.Model):
c_user = models.CharField(max_length=100)
c_email = models.CharField(max_length=100,default='')
c_quiz_name = models.CharField(max_length=100,default='')
c_tot_score = models.IntegerField(default=0)
c_cat_scores = models.TextField(null=True,default=0)
c_comment = models.TextField(null=True,default='')
c_new_quest = models.TextField(null=True,default='')
c_attempt_date = models.DateTimeField(default=datetime.now, blank=True)
c_total_q_asked = models.IntegerField(default=0)
c_total_ans_correct = models.IntegerField(default=0)
c_total_ans_incorrect = models.IntegerField(default=0)
def __str__(self):
return self.c_user
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,229
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/views.py
|
import datetime
from datetime import timedelta
import os
import pprint
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
from django.db.models import Count
import pandas as pd
from django.conf import settings
from django.http import HttpResponse, request, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, get_list_or_404, redirect, resolve_url
from django.utils.datastructures import MultiValueDictKeyError
from django.core.files.storage import FileSystemStorage
from django.views import generic
from django.urls import reverse
from .models import Question, Answer, Result,Quiz,Attendance,Trainee_Attendance
from django.http import HttpResponse
import json
import random
import csv, io
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from .forms import QuestionForm
from django.contrib.auth.models import User
from django.views.generic import (CreateView, DeleteView, DetailView, ListView,UpdateView)
def index(request):
quizname = request.session.get('quizname',None)
#quizattend = request.session['fromattend']
#return HttpResponse(quizname)
if quizname is None:
#settings.LOGIN_REDIRECT_URL = '/pickskill'
#return render(request, 'AIP/index.html')
settings.LOGIN_REDIRECT_URL = '/'
return render(request, 'AIP/success.html')
else:
return redirect('AIP:takequiz', pk=quizname)
def pickskill(request):
request.session['user'] = request.user.get_full_name()
request.session['email'] = request.user.email
user = request.session['user']
email = request.session['email']
Result.objects.create(c_user=user,c_email=email)
category_count = Question.objects.values('q_rank').order_by('q_rank').annotate(count=Count('q_rank'))
cat_count = list(category_count)
categories = []
for item in cat_count:
cnt = str(item['count'])
line = item['q_rank'] + ' (' + cnt + ')'
categories.append(line)
context = {'user':user,'categories':categories}
return render(request, 'AIP/pickskill.html',context)
def begin(request):
if request.method == 'POST':
subject = request.POST['skill']
rank = request.POST['proficiency']
context = {'subject': subject, 'rank': rank}
request.session['skill'] = subject
request.session['proficiency'] = rank
request.session['curr_difficulty_score'] = 1
request.session['total_q_asked'] = 1
request.session['total_q_ans_correct'] = 0
request.session['counter'] = 0
cat_dict = {'Introduction': 0, 'Syntax': 0, 'OOPS': 0, 'NativeDataTypes': 0, 'FileAndExceptionHandling': 0,
'Function': 0, 'Advanced': 0,'All': 0}
request.session['cat_dict'] = cat_dict
request.session['score'] = 0
if rank == 'Adaptive':
return render(request, 'AIP/begin.html', context)
else:
return render(request, 'AIP/beginsimple.html', context)
def quizsimple(request):
subject = request.session['skill']
rank = request.session['proficiency']
total_q_asked = request.session['total_q_asked']
total_q_ans_correct = request.session['total_q_ans_correct']
counter = request.session['counter']
score = request.session['score']
cat_dict = request.session['cat_dict']
user = request.session['user']
questions = Question.objects.filter(q_subject=subject, q_rank=rank)
max = Question.objects.filter(q_subject=subject, q_rank=rank).count()
ind = random.randint(1, max)
#return HttpResponse(ind)
question = questions[ind]
#question = questions[0]
context = {'total_q_asked': total_q_asked, 'question': question}
if request.method == 'POST':
option = request.POST.get('options')
q = Question(question.pk)
ans = Answer()
ans.question = q
question.no_times_ques_served += 1
total_q_asked += 1
if question.q_answer == option:
ans.ans_option = option
ans.is_correct = True
question.no_times_anwered_correctly += 1
total_q_ans_correct += 1
cat_dict[question.q_cat] += 1
ans.save()
else:
ans.ans_option = option
ans.is_correct = False
question.no_times_anwered_incorrectly += 1
ans.save()
Question.objects.filter(pk=q.pk).update(no_times_ques_served=question.no_times_ques_served,
no_times_anwered_correctly=question.no_times_anwered_correctly,
no_times_anwered_incorrectly=question.no_times_anwered_incorrectly)
if counter == (max-1) or request.POST.get('END') == 'STOP':
score1 = (total_q_ans_correct / (total_q_asked - 1)) * 100
score = round(score1)
cat_scores = json.dumps(cat_dict)
total_ans_incorrect = ((total_q_asked - 1) - total_q_ans_correct)
Result.objects.filter(c_user=user).update(c_tot_score=score)
Result.objects.filter(c_user=user).update(c_cat_scores=cat_scores,c_total_q_asked=(total_q_asked-1),c_total_ans_correct=total_q_ans_correct,c_total_ans_incorrect=total_ans_incorrect)
score_context = {'score': score, 'cat_dict': cat_dict, 'total_q_asked': total_q_asked - 1,
'total_q_ans_correct': total_q_ans_correct}
return render(request, 'AIP/report.html', score_context)
counter += 1
request.session['score'] = score
request.session['counter'] = counter
request.session['total_q_asked'] = total_q_asked
request.session['total_q_ans_correct'] = total_q_ans_correct
request.session['cat_dict'] = cat_dict
questions = Question.objects.filter(q_subject=subject, q_rank=rank)
max = Question.objects.filter(q_subject=subject, q_rank=rank).count()
ind = random.randint(1, max)
question = questions[ind]
context = {'total_q_asked': total_q_asked, 'question': question}
return render(request, 'AIP/quizsimple.html', context)
else:
# this is GET flow of 1st question
return render(request, 'AIP/quizsimple.html', context)
def quiz(request):
subject = request.session['skill']
rank = request.session['proficiency']
curr_difficulty_score = request.session['curr_difficulty_score']
total_q_asked = request.session['total_q_asked']
total_q_ans_correct = request.session['total_q_ans_correct']
score = request.session['score']
cat_dict = request.session['cat_dict']
user = request.session['user']
counter = request.session['counter']
questions = Question.objects.filter(q_subject=subject, q_rank=rank).filter(difficulty_score__gt=curr_difficulty_score).order_by('difficulty_score')
question = questions[0]
context = {'total_q_asked': total_q_asked, 'question': question}
if request.method == 'POST':
option = request.POST.get('options')
q = Question(question.pk)
ans = Answer()
ans.question = q
question.no_times_ques_served += 1
total_q_asked += 1
if question.q_answer == option:
ans.ans_option = option
ans.is_correct = True
question.no_times_anwered_correctly += 1
total_q_ans_correct += 1
cat_dict[question.q_cat] += 1
ans.save()
else:
ans.ans_option = option
ans.is_correct = False
question.no_times_anwered_incorrectly += 1
ans.save()
Question.objects.filter(pk=q.pk).update(no_times_ques_served=question.no_times_ques_served,
no_times_anwered_correctly=question.no_times_anwered_correctly,
no_times_anwered_incorrectly=question.no_times_anwered_incorrectly,
difficulty_score=curr_difficulty_score)
if counter == 4 or request.POST.get('END') == 'STOP':
score1 = (total_q_ans_correct / (total_q_asked - 1)) * 100
score = round(score1)
cat_scores = json.dumps(cat_dict)
total_ans_incorrect = ((total_q_asked - 1) - total_q_ans_correct)
Result.objects.filter(c_user=user).update(c_tot_score=score)
Result.objects.filter(c_user=user).update(c_cat_scores=cat_scores, c_total_q_asked=(total_q_asked-1),
c_total_ans_correct=total_q_ans_correct,
c_total_ans_incorrect=total_ans_incorrect)
score_context = {'score': score, 'cat_dict': cat_dict, 'total_q_asked': total_q_asked - 1,
'total_q_ans_correct': total_q_ans_correct}
return render(request, 'AIP/report.html', score_context)
counter += 1
request.session['counter'] = counter
request.session['score'] = score
request.session['total_q_asked'] = total_q_asked
request.session['total_q_ans_correct'] = total_q_ans_correct
request.session['curr_difficulty_score'] = curr_difficulty_score
request.session['cat_dict'] = cat_dict
# curr_difficulty_score = question.no_times_anwered_incorrectly / question.no_times_anwered_incorrectly + question.no_times_anwered_correctly
curr_difficulty_score = question.no_times_anwered_incorrectly / question.no_times_ques_served
questions = Question.objects.filter(q_subject=subject, q_rank=rank).filter(
difficulty_score__gt=curr_difficulty_score).order_by('difficulty_score')
question = questions[0]
context = {'total_q_asked': total_q_asked, 'question': question}
return render(request, 'AIP/quiz.html', context)
else:
return render(request, 'AIP/quiz.html', context)
def comment(request):
context = {}
user = request.session['user']
if request.method == 'POST':
comment = request.POST.get('comment')
Result.objects.filter(c_user=user).update(c_comment=comment)
context['commsuccess'] = "Comment added . Thank You !"
return render(request, 'AIP/report.html', context)
def question(request):
context = {}
user = request.session['user']
if request.method == 'POST':
question = request.POST.get('question')
Result.objects.filter(c_user=user).update(c_new_quest=question)
context['quessuccess'] = "Question added . Thank You !"
return render(request, 'AIP/report.html', context)
def upload(request):
context = {}
if request.method == 'POST':
try:
uploaded_file = request.FILES['document']
except MultiValueDictKeyError:
return HttpResponse("Please upload a file")
fs = FileSystemStorage()
name = fs.save(uploaded_file.name, uploaded_file)
context['url'] = fs.url(name)
return render(request, 'AIP/report.html', context)
def logout(request):
try:
del request.session['user']
except KeyError:
pass
return render(request, 'AIP/index.html')
@permission_required('admin.can_add_log_entry')
def export(request):
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response)
writer.writerow(
['q_subject', 'q_cat', 'q_rank', 'q_text', 'q_option1', 'q_option2', 'q_option3', 'q_option4', 'q_answer',
'q_ask_time', 'no_times_ques_served', 'no_times_anwered_correctly', 'no_times_anwered_incorrectly',
'difficulty_score'])
for data in Question.objects.all().values_list('q_subject', 'q_cat', 'q_rank', 'q_text', 'q_option1', 'q_option2',
'q_option3', 'q_option4', 'q_answer', 'q_ask_time',
'no_times_ques_served', 'no_times_anwered_correctly',
'no_times_anwered_incorrectly', 'difficulty_score'):
writer.writerow(data)
response['Content-Disposition'] = 'attachment; filename="questions.csv"'
return response
@permission_required('admin.can_add_log_entry')
def questionupload(request):
# template = question_upload.html
prompt = {
'order': 'Order of CSV should be Question,Option1,Option2,Option3,Option4,answer option'
}
if request.method == "GET":
return render(request, 'AIP/question_upload.html', prompt)
csv_file = request.FILES['file']
if not csv_file.name.endswith('.csv'):
messages.error(request, 'This is not a cvs file')
data_set = csv_file.read().decode('UTF-8')
io_string = io.StringIO(data_set)
next(io_string)
for column in csv.reader(io_string, delimiter='|'):
_, created = Question.objects.update_or_create(
q_subject=column[0],
q_cat=column[1],
q_rank=column[2],
q_text=column[3],
q_option1=column[4],
q_option2=column[5],
q_option3=column[6],
q_option4=column[7],
q_answer=column[8]
)
context = {}
return render(request, 'AIP/question_upload.html', context)
@permission_required('admin.can_add_log_entry')
def scores(request,pk):
quiz = get_object_or_404(Quiz, pk=pk)
quiznm = quiz.quiz_OrgIdentifier
results = Result.objects.filter(c_quiz_name=pk).order_by('-c_attempt_date')
if not results:
context = {'results': results, 'quiz': 'Quiz Not Found'}
else:
context = {'results':results,'quiznm':quiznm}
return render(request, 'AIP/scores.html', context)
def searchquiz(request,pk):
results = Result.objects.filter(c_quiz_name = pk).order_by('-c_attempt_date')
if not results:
context = {'results': results, 'quiz': 'Quiz Not Found'}
else:
context = {'results':results,'quiz':pk}
return render(request, 'AIP/scores.html', context)
@permission_required('admin.can_add_log_entry')
def quizzes(request):
quizzes = list(Quiz.objects.all())
context = {'quizzes': quizzes}
return render(request, 'AIP/quizzes.html',context)
def addquiz(request):
return render(request, 'AIP/quizadd.html')
def add(request):
subject = request.session['subject']
category = request.session['category']
count = request.session['count']
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save()
question.save()
if count == 2:
questions = list(Question.objects.all())
context = {'subject': subject, 'category': category, 'questions': questions}
return render(request, 'AIP/addquestion.html', context)
count += 1
request.session['count'] = count
form = QuestionForm()
return render(request, 'AIP/add.html', {'form': form})
else:
form = QuestionForm()
return render(request, 'AIP/add.html', {'form': form})
def addquestion(request):
subject = request.POST['Subject']
quizname = request.POST['quizname']
request.session['subject'] = subject
request.session['quizname'] = quizname
request.session['questionlist'] = []
request.session['count'] = 1
request.session['countdrop'] = 1
questions = list(Question.objects.all())
context = {'subject': subject, 'questions': questions}
return render(request, 'AIP/addquestion.html', context)
def addquestion1(request):
selectedquestion = []
subject = request.session['subject']
quizname = request.session['quizname']
if request.method == 'POST':
for question in request.POST.getlist('questionchecked'):
selectedquestion.append(int(question))
q = Quiz()
q.quiz_name = subject
q.quiz_OrgIdentifier = quizname
q.quiz_questions = selectedquestion
q.quiz_noofquest = len(selectedquestion)
q.save()
quizzes = Quiz.objects.all()
context = {'quizzes': quizzes}
return render(request, 'AIP/quizzes.html', context)
def quizbucket(request):
if request.user.is_authenticated:
quizzes = Quiz.objects.all()
context = {'quizzes': quizzes}
request.session['user'] = request.user.get_full_name()
user = request.session['user']
Result.objects.create(c_user=user)
request.session['q_no'] = 0
request.session['total_q_asked'] = 1
request.session['total_q_ans_correct'] = 0
request.session['counter'] = 0
cat_dict = {'Introduction': 0, 'Syntax': 0, 'OOPS': 0, 'NativeDataTypes': 0, 'FileAndExceptionHandling': 0,'Function': 0, 'Advanced': 0,'All':0}
request.session['cat_dict'] = cat_dict
request.session['score'] = 0
return render(request, 'AIP/quizbucket.html',context)
else:
return render(request, 'AIP/index.html')
def check_expiry():
datetimeFormat = '%Y-%m-%d %H:%M:%S'
date1 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
queryset = Attendance.objects.all()
reg_time = [p.expire_time for p in queryset]
date2 = reg_time[0].strftime("%Y-%m-%d %H:%M:%S")
diff = datetime.datetime.strptime(date2, datetimeFormat) \
- datetime.datetime.strptime(date1, datetimeFormat)
print("expire_time :" , date2)
print("login time" , date1)
print("difference", diff)
print(diff.total_seconds())
if diff.total_seconds() < 5 :
return True
else:
return False
def takequiz(request, pk):
if request.user.is_authenticated:
expired = check_expiry()
if expired == True:
return render(request, 'AIP/notfound.html')
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
request.session['user'] = request.user.get_full_name()
user = request.session['user']
request.session['email'] = request.user.email
email = request.session['email']
Trainee_Attendance.objects.create(trainee_email=email,login_time=now)
quiz = get_object_or_404(Quiz, pk=pk)
quiz_str = json.loads(quiz.quiz_questions)
total = len(quiz_str)
if request.method == 'POST':
q_no = request.session['q_no']
total_q_asked = request.session['total_q_asked']
total_q_ans_correct = request.session['total_q_ans_correct']
counter = request.session['counter']
score = request.session['score']
cat_dict = request.session['cat_dict']
user = request.session['user']
question = get_object_or_404(Question, pk=quiz_str[q_no])
context = {'total_q_asked': total_q_asked, 'question': question}
option = request.POST.get('options')
q = Question(question.pk)
ans = Answer()
ans.question = q
question.no_times_ques_served += 1
total_q_asked += 1
if question.q_answer == option:
ans.ans_option = option
ans.is_correct = True
question.no_times_anwered_correctly += 1
total_q_ans_correct += 1
cat_dict[question.q_cat] += 1
ans.save()
else:
ans.ans_option = option
ans.is_correct = False
question.no_times_anwered_incorrectly += 1
ans.save()
Question.objects.filter(pk=q.pk).update(no_times_ques_served=question.no_times_ques_served,
no_times_anwered_correctly=question.no_times_anwered_correctly,
no_times_anwered_incorrectly=question.no_times_anwered_incorrectly)
q_no += 1
if q_no == total or request.POST.get('END') == 'STOP':
score1 = (total_q_ans_correct / (total_q_asked - 1)) * 100
total_ans_incorrect = ((total_q_asked-1) - total_q_ans_correct)
score = round(score1)
cat_scores = json.dumps(cat_dict)
Result.objects.create(c_user=user,c_email=email, c_quiz_name=pk,c_tot_score=score,c_cat_scores=cat_scores,
c_total_q_asked=(total_q_asked-1),c_total_ans_correct=total_q_ans_correct,
c_total_ans_incorrect=total_ans_incorrect)
score_context = {'score': score, 'cat_dict': cat_dict, 'total_q_asked': total_q_asked - 1,
'total_q_ans_correct': total_q_ans_correct}
send_mail(email,score,cat_dict)
return render(request, 'AIP/report.html', score_context)
request.session['q_no'] = q_no
request.session['score'] = score
request.session['total_q_asked'] = total_q_asked
request.session['total_q_ans_correct'] = total_q_ans_correct
request.session['cat_dict'] = cat_dict
question = get_object_or_404(Question, pk=quiz_str[q_no])
quizname = request.session['quizname']
context = {'total_q_asked': total_q_asked, 'question': question,'quizname':quizname}
return render(request, 'AIP/quizsimple.html', context)
else:
request.session['q_no'] = 0
request.session['total_q_asked'] = 1
request.session['total_q_ans_correct'] = 0
request.session['counter'] = 0
cat_dict = {'Introduction': 0, 'Syntax': 0, 'OOPS': 0, 'NativeDataTypes': 0, 'FileAndExceptionHandling': 0,
'Function': 0, 'Advanced': 0, 'All': 0}
request.session['cat_dict'] = cat_dict
request.session['score'] = 0
q_no = request.session['q_no']
total_q_asked = request.session['total_q_asked']
question = get_object_or_404(Question, pk=quiz_str[q_no])
quizname = request.session['quizname']
context = {'total_q_asked': total_q_asked, 'question': question,'quizname':quizname}
return render(request, 'AIP/quizsimple.html', context)
else:
request.session['quizname'] = '{}'.format(pk)
quizname = request.session['quizname']
context = {'quizname':quizname}
settings.LOGIN_REDIRECT_URL = '/'
return render(request, 'AIP/index.html',context)
def send_mail(email,score,cat_dict):
print(email)
score_data= pd.DataFrame(
{
'category score':cat_dict,
}
)
#recipients = [email,'sukhpreetn@gmail.com','ravi92teja@gmail.com']
recipients = [email, 'sukhpreetn@gmail.com']
emaillist = [elem.strip().split(',') for elem in recipients]
msg = MIMEMultipart()
msg['Subject'] = "Quiz Hop Scores"
msg['From'] = 'ravi92teja@gmail.com'
html = """\
<html>
<head></head>
<body>
Total Score is : {0}
<p></p>
Category wise split of Scores :
{1}
</body>
</html>
""".format(score,score_data.to_html())
part1 = MIMEText(html, 'html')
msg.attach(part1)
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('pyvebdev@gmail.com', 'snwylifpbqtszbxe')
server.sendmail(msg['From'], emaillist , msg.as_string())
@permission_required('admin.can_add_log_entry')
def review(request):
questions = Question.objects.all()
total = len(questions)
context = {'questions': questions, 'total': total}
return render(request, 'AIP/compare.html', context)
@permission_required('admin.can_add_log_entry')
def reviewquiz(request,pk):
quiz = get_object_or_404(Quiz, pk=pk)
quiz_str = json.loads(quiz.quiz_questions)
total = len(quiz_str)
questions = []
for qid in quiz_str:
ques = get_object_or_404(Question,pk=qid)
questions.append(ques)
context = {'questions': questions,'total':total,'subject':questions[0].q_subject,'category':questions[0].q_cat}
return render(request, 'AIP/compare.html',context)
def markattendance(request, pk):
if request.method == 'POST':
trainer = request.POST.get('trainer')
trainees = request.POST.get('trainees')
expires = request.POST.get('expires')
Attendance.objects.create(trainer_name=trainer, trainee_emails=trainees, expire_time=expires)
trainee_list = trainees.split("\r\n")
send_mail_attendance(trainee_list,pk)
#request.session['fromattend'] = 'attend'
return redirect('AIP:showattendance', pk=pk)
else:
return render(request, 'AIP/markattendance.html')
def showattendance(request, pk):
results = Trainee_Attendance.objects.all()
context = {'results':results,'pk':pk}
return render(request, 'AIP/showattendance.html',context)
def send_mail_attendance(trainee_list,pk):
q_name = str(pk)
#url = "http://sukhpreetn.pythonanywhere.com/"
url = "http://127.0.0.1:8000/"
recipients = trainee_list
emaillist = [elem.strip().split(',') for elem in recipients]
msg = MIMEMultipart()
msg['Subject'] = "Quiz Hop : Mark your attendance"
msg['From'] = 'ravi92teja@gmail.com'
html = """\
<html>
<head></head>
<body>
Login <a href= {0}> here </a>
</body>
</html>
""".format(url)
part1 = MIMEText(html, 'html')
msg.attach(part1)
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('pyvebdev@gmail.com', 'snwylifpbqtszbxe')
server.sendmail(msg['From'], emaillist , msg.as_string())
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,230
|
sukhpreetn/QuizApp
|
refs/heads/master
|
/AIP/migrations/0005_attendance.py
|
# Generated by Django 3.0.3 on 2020-04-03 18:25
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AIP', '0004_result_c_email'),
]
operations = [
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trainer_name', models.CharField(default='', max_length=200)),
('trainee_emails', models.TextField(null=True)),
('register_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
{"/AIP/forms.py": ["/AIP/models.py"], "/AIP/admin.py": ["/AIP/models.py"], "/AIP/views.py": ["/AIP/models.py", "/AIP/forms.py"]}
|
16,241
|
jsharples/usgs_nwis
|
refs/heads/master
|
/usgs_nwis/usgs_nwis.py
|
__all__ = ['SitesQuery', 'BaseQuery', 'DataBySites']
from urllib import parse, request
import json
from datetime import datetime, timedelta
import gzip
import io
class pyUSGSError(Exception):
pass
class BaseQuery(object):
"""
The basic query class to access the USGS water data service
Parameters
----------
major_filter: dict
Single key value pair, values can be lists, keys must be one of:
* sites - for a list of specifc site ids
* stateCd - for a state abbreviaiton e.g. "ny"
* huc - for a list of Hydrologic Unit Codes
* bBox - specifiying a lat long bounding box
* countyCd - for a list of county numbers
Each query to the USGS NWIS must include one, and only one, of these filters.
service: str
The service to query, 'dv' for daily values, 'iv' for instantaneous
data_format: str
The format in which to get the data. Defult is `json`, if changed the `get_data` funciton will not work.
"""
def __init__(self, major_filter, service='dv', data_format = 'json'):
self._format = {'format': data_format}
self.allowed_filters = ["sites","stateCd","huc","bBox","countyCd"]
if list(major_filter.keys())[0] in self.allowed_filters:
self.major_filter = major_filter
else:
raise ValueError("major_filter must be one of: {}".format(', '.join(self.allowed_filters)))
self.base_url = "https://waterservices.usgs.gov/nwis/{}/?".format(service)
self.data = None
self.raw_data = None
def get_data(self, **kwargs):
"""
Get data form the USGS webservice and parse to a python dictionary.
Parameters
----------
**kwargs : dict
A dictionary specifying the search and filter items for this query.
Returns
----------
dict
A dictionary of the requested data
"""
if not self.raw_data:
self._get_raw_data(**kwargs)
self.data = json.loads(self.raw_data)
return self.data
def _make_request_url(self, **kwargs):
"""
Make the request URL from kwargs
"""
kwargs.update(self.major_filter)
kwargs.update(self._format)
for arg in kwargs.keys():
try:
assert not isinstance(kwargs[arg], str)
#multiple values must be seperated by a comma
kwargs[arg] = ','.join(map(str, kwargs[arg]))
except:
pass
return self.base_url + parse.urlencode(kwargs, doseq=True)
def _get_raw_data(self, **kwargs):
"""
Get the raw data response
"""
self.request_url = self._make_request_url(**kwargs)
#the USGS requests that users use gzip where possible
data_request = request.Request(
self.request_url,
headers={"Accept-Encoding": "gzip"})
data_response = request.urlopen(data_request)
if data_response.info().get('Content-Encoding') == 'gzip':
result = gzip.decompress(data_response.read())
else:
result = data_response.read()
self.raw_data = result.decode(data_response.info().get_content_charset('utf-8'))
return self.raw_data
@staticmethod
def _date_parse(str_date):
"""
Function for parsing dates.
Note that the USGS use ISO_8601 for date formats, including a ':' in the timezone.
There does not appear to be a simple way to parse this to a datetime object.
"""
if len(str_date) == 29 and str_date[-3]==':':
str_date = str_date[:-3]+str_date[-2:]
return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S.%f%z')
if len(str_date) == 23:
return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S.%f')
if len(str_date) == 19:
return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S')
if len(str_date) == 16:
return datetime.strptime(str_date,'%Y-%m-%dT%H:%M')
if len(str_date) == 10:
return datetime.strptime(str_date,'%Y-%m-%d')
class SitesQuery(BaseQuery):
"""
Class to access the Site Service
Parameters
----------
major_filter: dict
Single key value pair, values can be lists, keys must be one of:
* sites - for a list of specifc site ids
* stateCd - for a state abbreviaiton e.g. "ny"
* huc - for a list of Hydrologic Unit Codes
* bBox - specifiying a lat long bounding box
* countyCd - for a list of county numbers
Each query to the USGS NWIS must include one, and only one, of these filters.
service: str
The service to query, 'dv' for daily values, 'iv' for instantaneous
data_format: str
The format in which to get the data. Defult is `json`, if changed the `get_data` funciton will not work.
"""
def __init__(self, major_filter):
super().__init__(major_filter = major_filter, service = 'site', data_format = 'rdb')
self.sites = None
#we cannot use BaseQuery.get_data because the Site Service does not offer JSON
def get_data(self, **kwargs):
"""
Get data form the USGS Site Service and parse to a python dictionary.
Parameters
----------
**kwargs : dict
A dictionary specifying the search and filter items for this query.
Returns
----------
dict
A dictionary of the requested site data
"""
if not self.raw_data:
self.raw_data = self._get_raw_data(**kwargs)
info = ''
header = []
data = []
n=0
for l in self.raw_data.split('\n'):
if len(l) > 0:
if l[0] == '#':
info += l + '\n'
else:
if n <3:
header.append(l.split('\t'))
n += 1
else:
data.append(l.split('\t'))
data = [{header[0][x]: y[x] for x in range(len(header[0]))} for y in data]
self.data = {'data':data, 'info':info}
return self.data
def get_site_ids(self, **kwargs):
"""
Create a list of the sites found by this query.
Parameters
----------
**kwargs : dict
A dictionary specifying the search and filter items for this query.
Returns
----------
list
A list of site IDs matching the search and filters for this query.
"""
if not self.data:
self.get_data(**kwargs)
self.sites = [s['site_no'] for s in self.data['data']]
return self.sites
class DataBySites(BaseQuery):
"""
Class to access data bases on a list of sites.
Parameters
----------
sites: list
A list of sites IDs to query for data
service: str
The service to query, 'dv' for daily values, 'iv' for instantaneous
**kwargs : dict
A dictionary specifying the search and filter items for this query.
"""
def __init__(self, sites, service='dv', **kwargs):
super().__init__(major_filter = {"sites":sites}, service=service)
self.data = self.get_data(**kwargs)
self.core_data = None
def make_core_data(self):
"""
Make a simplified version of the data containing only 'core' data fields.
Parameters
----------
none
Returns
----------
dict
A simplified dictionary of the requested site data
"""
core_data = []
for ts in self.data['value']['timeSeries']:
core_data.append(dict(
location = ts['sourceInfo']['geoLocation']['geogLocation'],
name = ts['sourceInfo']['siteName'],
site = ts['sourceInfo']['siteCode'][0]['value'],
unit = ts['variable']['unit']['unitCode'],
description = ts['variable']['variableDescription'],
qual_codes = {x['qualifierCode']: x['qualifierDescription'] for x in ts['values'][0]['qualifier']},
data = ts['values'][0]['value'],
time_zone = ts['sourceInfo']['timeZoneInfo']['defaultTimeZone']['zoneOffset']
))
self.core_data = core_data
return self.core_data
|
{"/usgs_nwis/__init__.py": ["/usgs_nwis/usgs_nwis.py"]}
|
16,242
|
jsharples/usgs_nwis
|
refs/heads/master
|
/usgs_nwis/__init__.py
|
from .usgs_nwis import *
|
{"/usgs_nwis/__init__.py": ["/usgs_nwis/usgs_nwis.py"]}
|
16,243
|
Anarchid/uberserver
|
refs/heads/master
|
/server.py
|
#!/usr/bin/env python
# coding=utf-8
try:
import thread
except:
# thread was renamed to _thread in python 3
import _thread
import traceback, signal, socket, sys
try:
from urllib2 import urlopen
except:
# The urllib2 module has been split across several modules in Python 3.0
from urllib.request import urlopen
sys.path.append("protocol")
sys.path.append(".")
from DataHandler import DataHandler
from Client import Client
from NATServer import NATServer
from Dispatcher import Dispatcher
from XmlRpcServer import XmlRpcServer
import ip2country # just to make sure it's downloaded
import ChanServ
# uncomment for debugging deadlocks, creates a stacktrace at the given interval to stdout
#import stacktracer
#stacktracer.trace_start("trace.html",interval=5,auto=True) # Set auto flag to always update file!
_root = DataHandler()
_root.parseArgv(sys.argv)
try:
signal.SIGHUP
def sighup(sig, frame):
_root.console_write('Received SIGHUP.')
if _root.sighup:
_root.reload()
signal.signal(signal.SIGHUP, sighup)
except AttributeError:
pass
_root.console_write('-'*40)
_root.console_write('Starting uberserver...\n')
host = ''
port = _root.port
natport = _root.natport
backlog = 100
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR,
server.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 )
# fixes TIME_WAIT :D
server.bind((host,port))
server.listen(backlog)
try:
natserver = NATServer(natport)
try:
thread.start_new_thread(natserver.start,())
except NameError:
_thread.start_new_thread(natserver.start,())
natserver.bind(_root)
except socket.error:
print('Error: Could not start NAT server - hole punching will be unavailable.')
_root.console_write()
_root.console_write('Detecting local IP:')
try: local_addr = socket.gethostbyname(socket.gethostname())
except: local_addr = '127.0.0.1'
_root.console_write(local_addr)
_root.console_write('Detecting online IP:')
try:
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(5)
web_addr = urlopen('http://springrts.com/lobby/getip.php').read()
socket.setdefaulttimeout(timeout)
_root.console_write(web_addr)
except:
web_addr = local_addr
_root.console_write('not online')
_root.console_write()
_root.local_ip = local_addr
_root.online_ip = web_addr
_root.console_write('Using %i client handling thread(s).'%_root.max_threads)
dispatcher = Dispatcher(_root, server)
_root.dispatcher = dispatcher
chanserv = True
if chanserv:
address = ((web_addr or local_addr), 0)
chanserv = ChanServ.ChanServClient(_root, address, _root.session_id)
dispatcher.addClient(chanserv)
_root.chanserv = chanserv
try:
xmlrpcserver = XmlRpcServer(_root, _root.xmlhost, _root.xmlport)
try:
thread.start_new_thread(xmlrpcserver.start,())
except NameError:
_thread.start_new_thread(xmlrpcserver.start,())
_root.console_write('Listening for XMLRPC clients on %s:%d' % (_root.xmlhost, _root.xmlport))
except socket.error:
print('Error: Could not start XmlRpcServer.')
try:
dispatcher.pump()
except KeyboardInterrupt:
_root.console_write()
_root.console_write('Server killed by keyboard interrupt.')
except:
_root.error(traceback.format_exc())
_root.console_write('Deep error, exiting...')
_root.console_print_step() # try to flush output buffer to log file
# _root.console_write('Killing handlers.')
# for handler in _root.clienthandlers:
# handler.running = False
_root.console_write('Killing clients.')
for client in dict(_root.clients):
try:
conn = _root.clients[client].conn
if conn: conn.close()
except: pass # for good measure
server.close()
_root.running = False
_root.console_print_step()
memdebug = False
if memdebug:
recursion = []
names = {}
def dump(obj, tabs=''):
if obj in recursion: return str(obj)
else: recursion.append(obj)
try:
if type(obj) == (list, set):
return [dump(var) for var in obj]
elif type(obj) in (str, unicode, int, float):
return obj
elif type(obj) == dict:
output = {}
for key in obj:
output[key] = dump(obj[key], tabs+'\t')
else:
output = {}
ovars = vars(obj)
for key in ovars:
if key in names: names[key] += 1
else: names[key] = 1
output[key] = dump(ovars[key], tabs+'\t')
return '\n'.join(['%s%s:\n%s\t%s' % (tabs, key, tabs, output[key]) for key in output]) if output else {}
except: return 'no __dict__'
print('Dumping memleak info.')
f = open('dump.txt', 'w')
f.write(dump(_root))
f.close()
counts = {}
for name in names:
count = names[name]
if count in counts:
counts[count].append(name)
else:
counts[count] = [name]
f = open('counts.txt', 'w')
for key in reversed(sorted(counts)):
f.write('%s: %s\n' % (key, counts[key]))
f.close()
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,244
|
Anarchid/uberserver
|
refs/heads/master
|
/SayHooks.py
|
import inspect, sys, os, types, time, string
_permissionlist = ['admin', 'adminchan', 'mod', 'modchan', 'chanowner', 'chanadmin', 'chanpublic', 'public', 'battlehost', 'battlepublic']
_permissiondocs = {
'admin':'Admin Commands',
'adminchan':'Admin Commands (channel)',
'mod':'Moderator Commands',
'modchan':'Moderator Commands (channel)',
'chanowner':'Channel Owner Commands (channel)',
'chanadmin':'Channel Admin Commands (channel)',
'chanpublic':'Public Commands (channel)',
'public':'Public Commands',
'battlepublic':'Public Commands (battle)',
'battlehost':'Battle Host Commands',
}
def _erase():
l = dict(globals())
for iter in l:
if not iter == '_erase':
del globals()[iter]
global bad_word_dict
global bad_site_list
bad_word_dict = {}
bad_site_list = []
def _update_lists():
try:
f = open('bad_words.txt', 'r')
for line in f.readlines():
if line.count(' ') < 1:
bad_word_dict[line.strip()] = '***'
else:
sline = line.strip().split(' ', 1)
bad_word_dict[sline[0]] = ' '.join(sline[1:])
f.close()
except Exception as e:
print('Error parsing profanity list: %s' %(e))
try:
f = open('bad_sites.txt', 'r')
for line in f.readlines():
line = line.strip()
if line and not line in bad_site_list: bad_site_list.append(line)
f.close()
except Exception as e:
print('Error parsing shock site list: %s' %(e))
def _clear_lists():
global bad_word_dict
global bad_site_list
bad_word_dict = {}
bad_site_list = []
_update_lists()
chars = string.ascii_letters + string.digits
def _process_word(word):
if word == word.upper(): uppercase = True
else: uppercase = False
lword = word.lower()
if lword in bad_word_dict:
word = bad_word_dict[lword]
if uppercase: word = word.upper()
return word
def _nasty_word_censor(msg):
msg = msg.lower()
for word in bad_word_dict.keys():
if word.lower() in msg: return False
return True
def _word_censor(msg):
words = []
word = ''
letters = True
for letter in msg:
if bool(letter in chars) == bool(letters): word += letter
else:
letters = not bool(letters)
words.append(word)
word = letter
words.append(word)
newmsg = []
for word in words:
newmsg.append(_process_word(word))
return ''.join(newmsg)
def _site_censor(msg):
testmsg1 = ''
testmsg2 = ''
for letter in msg:
if not letter: continue
if letter.isalnum():
testmsg1 += letter
testmsg2 += letter
elif letter in './%':
testmsg2 += letter
for site in bad_site_list:
if site in msg or site in testmsg1 or site in testmsg2:
return # 'I think I can post shock sites, but I am wrong.'
return msg
def _spam_enum(client, chan):
now = time.time()
bonus = 0
already = []
times = [now]
for when in dict(client.lastsaid[chan]):
t = float(when)
if t > now-5: # check the last five seconds # can check a longer period of time if old bonus decay is included, good for 2-3 second spam, which is still spam.
for message in client.lastsaid[chan][when]:
times.append(t)
if message in already:
bonus += 2 * already.count(message) # repeated message
if len(message) > 50:
bonus += min(len(message), 200) * 0.01 # long message: 0-2 bonus points based linearly on length 0-200
bonus += 1 # something was said
already.append(message)
else: del client.lastsaid[chan][when]
times.sort()
last_time = None
for t in times:
if last_time:
diff = t - last_time
if diff < 1:
bonus += (1 - diff) * 1.5
last_time = t
if bonus > 7: return True
else: return False
def _spam_rec(client, chan, msg):
now = str(time.time())
if not chan in client.lastsaid: client.lastsaid[chan] = {}
if not now in client.lastsaid[chan]:
client.lastsaid[chan][now] = [msg]
else:
client.lastsaid[chan][now].append(msg)
def _chan_msg_filter(self, client, chan, msg):
username = client.username
channel = self._root.channels[chan]
if channel.isMuted(client): return msg # client is muted, no use doing anything else
if channel.antispam and not channel.isOp(client): # don't apply antispam to ops
_spam_rec(client, chan, msg)
if _spam_enum(client, chan):
channel.muteUser(self._root.chanserv, client, 15, ip=True, quiet=True)
# this next line is necessary, because users aren't always muted i.e. you can't mute channel founders or moderators
if channel.isMuted(client):
channel.channelMessage('%s was muted for spamming.' % username)
#if quiet: # maybe make quiet a channel-wide setting, so mute/kick/op/etc would be silent
# client.Send('CHANNELMESAGE %s You were quietly muted for spamming.'%chan)
return ''
if channel.censor:
msg = _word_censor(msg)
if channel.antishock:
msg = _site_censor(msg)
return msg
def hook_SAY(self, client, chan, msg):
user = client.username
channel = self._root.channels[chan]
msg = _chan_msg_filter(self, client, chan, msg)
return msg
def hook_SAYEX(self, client, chan, msg):
msg = _chan_msg_filter(self, client, chan, msg)
return msg
def hook_SAYPRIVATE(self, client, target, msg):
return _site_censor(msg)
def hook_SAYBATTLE(self, client, battle_id, msg):
return msg # no way to respond in battles atm
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,245
|
Anarchid/uberserver
|
refs/heads/master
|
/ip2country.py
|
from pygeoip import pygeoip
import traceback
dbfile = 'GeoIP.dat'
def update():
gzipfile = dbfile + ".gz"
f = open(gzipfile, 'w')
dburl = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz'
import urllib2
import gzip
print("Downloading %s ..." %(dburl))
response = urllib2.urlopen(dburl)
f.write(response.read())
f.close()
print("done!")
f = gzip.open(gzipfile)
db = open(dbfile, 'w')
db.write(f.read())
f.close()
db.close()
try:
f=open(dbfile,'r')
f.close()
except:
print("%s doesn't exist, downloading..." % (dbfile))
update()
def loaddb():
global geoip
try:
geoip = pygeoip.Database(dbfile)
return True
except Exception as e:
print("Couldn't load %s: %s" % (dbfile, str(e)))
print(traceback.format_exc())
return False
working = loaddb()
def lookup(ip):
if not working: return '??'
addrinfo = geoip.lookup(ip)
if not addrinfo.country: return '??'
return addrinfo.country
def reloaddb():
if not working: return
loaddb()
"""
print lookup("37.187.59.77")
print lookup("77.64.139.108")
print lookup("8.8.8.8")
print lookup("0.0.0.0")
import csv
with open('/tmp/test.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='"')
for row in reader:
ip = row[0]
print("%s %s" %(ip, lookup(row[0])))
"""
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,246
|
Anarchid/uberserver
|
refs/heads/master
|
/protocol/Channel.py
|
from AutoDict import AutoDict
import time
class Channel(AutoDict):
def __init__(self, root, name, id = 0, users=[], admins=[],
ban={}, allow=[], autokick='ban', chanserv=False,
owner='', mutelist={}, antispam=False,
censor=False, antishock=False, topic=None,
key=None, history=False, **kwargs):
self.id = id
self._root = root
self.name = name
self.users = users
self.admins = admins
self.ban = ban
self.allow = allow
self.autokick = autokick
self.chanserv = chanserv
self.owner = owner
self.mutelist = mutelist
self.antispam = antispam
self.censor = censor
self.antishock = antishock
self.topic = topic
self.key = key
self.history = history
self.__AutoDictInit__()
if self._root and chanserv and self._root.chanserv and not name in self._root.channels:
self._root.chanserv.Send('JOIN %s' % self.name)
def broadcast(self, message):
self._root.broadcast(message, self.name)
def channelMessage(self, message):
self.broadcast('CHANNELMESSAGE %s %s' % (self.name, message))
def register(self, client, owner):
self.owner = owner.db_id
def addUser(self, client):
username = client.username
if not username in self.users:
self.users.append(username)
self.broadcast('JOINED %s %s' % (self.name, username))
def removeUser(self, client, reason=None):
chan = self.name
username = client.username
if username in self.users:
self.users.remove(username)
if self.name in client.channels:
client.channels.remove(chan)
if reason and len(reason) > 0:
self._root.broadcast('LEFT %s %s %s' % (chan, username, reason), chan)
else:
self._root.broadcast('LEFT %s %s' % (chan, username), chan)
def isAdmin(self, client):
return client and ('admin' in client.accesslevels)
def isMod(self, client):
return client and (('mod' in client.accesslevels) or self.isAdmin(client))
def isFounder(self, client):
return client and ((client.db_id == self.owner) or self.isMod(client))
def isOp(self, client):
return client and ((client.db_id in self.admins) or self.isFounder(client))
def getAccess(self, client): # return client's security clearance
return 'mod' if self.isMod(client) else\
('founder' if self.isFounder(client) else\
('op' if self.isOp(client) else\
'normal'))
def isMuted(self, client):
return client.db_id in self.mutelist
def getMuteMessage(self, client):
if self.isMuted(client):
m = self.mutelist[client.db_id]
if m['expires'] == 0:
return 'muted forever'
else:
# TODO: move format_time, bin2dec, etc to a utilities class or module
return 'muted for the next %s.' % (client._protocol._time_until(m['expires']))
else:
return 'not muted'
def isAllowed(self, client):
if self.autokick == 'allow':
return (self.isOp(client) or (client.db_id in self.allow)) or 'not allowed here'
elif self.autokick == 'ban':
return (self.isOp(client) or (client.db_id not in self.ban)) or self.ban[client.db_id]
def setTopic(self, client, topic):
self.topic = topic
if topic in ('*', None):
if self.topic:
self.channelMessage('Topic disabled.')
topicdict = {}
else:
self.channelMessage('Topic changed.')
topicdict = {'user':client.username, 'text':topic, 'time':time.time()}
self.broadcast('CHANNELTOPIC %s %s %s %s'%(self.name, client.username, topicdict['time'], topic))
self.topic = topicdict
def setKey(self, client, key):
if key in ('*', None):
if self.key:
self.key = None
self.channelMessage('<%s> unlocked this channel' % client.username)
else:
self.key = key
self.channelMessage('<%s> locked this channel with a password' % client.username)
def setFounder(self, client, target):
if not target: return
self.owner = target.db_id
self.channelMessage("<%s> has just been set as this channel's founder by <%s>" % (target.username, client.username))
def opUser(self, client, target):
if target and not target.db_id in self.admins:
self.admins.append(target.db_id)
self.channelMessage("<%s> has just been added to this channel's operator list by <%s>" % (target.username, client.username))
def deopUser(self, client, target):
if target and target.db_id in self.admins:
self.admins.remove(target.db_id)
self.channelMessage("<%s> has just been removed from this channel's operator list by <%s>" % (target.username, client.username))
def kickUser(self, client, target, reason=''):
if self.isFounder(target): return
if target and target.username in self.users:
target.Send('FORCELEAVECHANNEL %s %s %s' % (self.name, client.username, reason))
self.channelMessage('<%s> has kicked <%s> from the channel%s' % (client.username, target.username, (' (reason: %s)'%reason if reason else '')))
self.removeUser(target, 'kicked from channel%s' % (' (reason: %s)'%reason if reason else ''))
def banUser(self, client, target, reason=''):
if self.isFounder(target): return
if target and not target.db_id in self.ban:
self.ban[target.db_id] = reason
self.kickUser(client, target, reason)
self.channelMessage('<%s> has been banned from this channel by <%s>' % (target.username, client.username))
def unbanUser(self, client, target):
if target and target.db_id in self.ban:
del self.ban[target.db_id]
self.channelMessage('<%s> has been unbanned from this channel by <%s>' % (target.username, client.username))
def allowUser(self, client, target):
if target and not client.db_id in self.allow:
self.allow.append(client.db_id)
self.channelMessage('<%s> has been allowed in this channel by <%s>' % (target.username, client.username))
def disallowUser(self, client, target):
if target and client.db_id in self.allow:
self.allow.remove(client.db_id)
self.channelMessage('<%s> has been disallowed in this channel by <%s>' % (target.username, client.username))
def muteUser(self, client, target, duration=0, ip=False, quiet=False):
if self.isFounder(target): return
if target and not client.db_id in self.mutelist:
if not quiet:
self.channelMessage('<%s> has muted <%s>' % (client.username, target.username))
try:
duration = float(duration)*60
if duration < 1:
duration = 0
else:
duration = time.time() + duration
except: duration = 0
self.mutelist[target.db_id] = {'expires':duration, 'ip':ip, 'quiet':quiet}
def unmuteUser(self, client, target):
if target and target.db_id in self.mutelist:
del self.mutelist[target.db_id]
self.channelMessage('<%s> has unmuted <%s>' % (client.username, target.username))
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,247
|
Anarchid/uberserver
|
refs/heads/master
|
/protocol/Battle.py
|
from AutoDict import AutoDict
class Battle(AutoDict):
def __init__(self, root, id, type, natType, password, port, maxplayers,
hashcode, rank, maphash, map, title, modname,
passworded, host, users, spectators=0,
startrects={}, disabled_units=[], pending_users=set(),
authed_users=set(), bots={}, script_tags={},
replay_script={}, replay=False,
sending_replay_script=False, locked=False,
engine=None, version=None):
self._root = root
self.id = id
self.type = type
self.natType = natType
self.password = password
self.port = port
self.maxplayers = maxplayers
self.spectators = spectators
self.hashcode = hashcode
self.rank = rank
self.maphash = maphash
self.map = map
self.title = title
self.modname = modname
self.passworded = passworded
self.users = users
self.host = host
self.startrects = startrects
self.disabled_units = disabled_units
self.pending_users = pending_users
self.authed_users = authed_users
self.engine = (engine or 'spring').lower()
self.version = version or root.latestspringversion
self.bots = bots
self.script_tags = script_tags
self.replay_script = replay_script
self.replay = replay
self.sending_replay_script = sending_replay_script
self.locked = locked
self.spectators = 0
self.__AutoDictInit__()
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,248
|
Anarchid/uberserver
|
refs/heads/master
|
/XmlRpcServer.py
|
#
# xmlrpc class for auth of replays.springrts.com
#
# TODO:
# - remove dependency to Protocol.py
# - move SQLAlchemy calls to SQLUsers.py
# -> remove _FakeClient
import BaseHTTPServer
from SimpleXMLRPCServer import SimpleXMLRPCServer
from base64 import b64encode
import os.path
import logging
from logging.handlers import TimedRotatingFileHandler
from protocol import Protocol
from CryptoHandler import MD5LEG_HASH_FUNC as LEGACY_HASH_FUNC
from SQLUsers import User
# logging
xmlrpc_logfile = os.path.join(os.path.dirname(__file__), "xmlrpc.log")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = TimedRotatingFileHandler(xmlrpc_logfile, when="midnight", backupCount=6)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-5s %(module)s.%(funcName)s:%(lineno)d %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
def _xmlrpclog(self, format, *args):
logger.debug("%s - %s" , self.client_address[0], format%args)
# overwrite default logger, because it will otherwise spam main server log
BaseHTTPServer.BaseHTTPRequestHandler.log_message = _xmlrpclog
class XmlRpcServer(object):
"""
XMLRPC service, exported functions are in class _RpcFuncs
"""
def __init__(self, root, host, port):
self._root = root
self.host = host
self.port = port
self._server = SimpleXMLRPCServer((self.host, self.port))
self._server.register_introspection_functions()
self._server.register_instance(_RpcFuncs(self._root))
def start(self):
logger.info('Listening for XMLRPC clients on %s:%d', self.host, self.port)
self._server.serve_forever()
def shutdown(self):
self._server.shutdown()
class _RpcFuncs(object):
"""
All methods of this class will be exposed via XMLRPC.
"""
def __init__(self, root):
self._root = root
self._proto = Protocol.Protocol(self._root)
def get_account_info(self, username, password):
password_enc = unicode(b64encode(LEGACY_HASH_FUNC(password).digest()))
client = _FakeClient(self._root)
self._proto.in_TESTLOGIN(client, unicode(username), password_enc) # FIXME: don't use Protocol.py
logger.debug("client.reply: %s", client.reply)
if client.reply.startswith("TESTLOGINACCEPT %s" % username):
session = self._root.userdb.sessionmaker() # FIXME: move to SQLUsers.py
db_user = session.query(User).filter(User.username == username).first()
renames = list()
for rename in db_user.renames:
renames.append(rename.original)
if db_user.renames:
renames.append(db_user.renames[-1].new)
renames = set(renames)
result = {"status": 0, "accountid": int(db_user.id), "username": str(db_user.username),
"ingame_time": int(db_user.ingame_time), "email": str(db_user.email),
"aliases": list(renames)}
try:
result["country"] = db_user.logins[-1].country
except:
result["country"] = ""
return result
else:
return {"status": 1}
class _FakeClient(object):
"""
Protocol.Protocol uses this object for communication.
"""
def __init__(self, root):
self._root = root
self.reply = ""
def Send(self, reply):
self.reply = reply
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,249
|
Anarchid/uberserver
|
refs/heads/master
|
/pygeoip/__init__.py
|
# pygeoip from https://code.google.com/p/python-geoip/
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,250
|
Anarchid/uberserver
|
refs/heads/master
|
/Multiplexer.py
|
import time
from select import * # eww hack but saves the other hack of selectively importing constants
class EpollMultiplexer:
def __init__(self):
self.filenoToSocket = {}
self.socketToFileno = {}
self.sockets = set([])
self.output = set([])
self.inMask = EPOLLIN | EPOLLPRI
self.outMask = EPOLLOUT
self.errMask = EPOLLERR | EPOLLHUP
self.poller = epoll()
def register(self, s):
s.setblocking(0)
fileno = s.fileno()
self.filenoToSocket[fileno] = s
self.socketToFileno[s] = fileno # gotta maintain this because fileno() lookups aren't possible on closed sockets
self.sockets.add(s)
self.poller.register(fileno, self.inMask | self.errMask)
def unregister(self, s):
if s in self.sockets:
self.sockets.remove(s)
if s in self.output:
self.output.remove(s)
if s in self.socketToFileno:
fileno = self.socketToFileno[s]
self.poller.unregister(fileno)
del self.socketToFileno[s]
del self.filenoToSocket[fileno]
def setoutput(self, s, ready):
# this if structure means it only scans output once.
if not ready and s in self.output:
self.output.remove(s)
elif not ready:
return
elif ready and s in self.sockets:
self.output.add(s)
if not s in self.socketToFileno: return
eventmask = self.inMask | self.errMask | (self.outMask if ready else 0)
self.poller.modify(s, eventmask) # not valid for select.poll before python 2.6, might need to replace with register() in this context
def pump(self, callback):
while True:
inputs, outputs, errors = self.poll()
callback(inputs, outputs, errors)
def poll(self):
results = []
try:
results = self.poller.poll(10)
except IOError as e:
if e[0] == 4:
# interrupted system call - this happens when any signal is triggered
pass
else:
raise e
inputs = []
outputs = []
errors = []
for fd, mask in results:
try:
s = self.filenoToSocket[fd]
except: # FIXME: socket was already deleted, shouldn't happen, but does!
continue
if mask & self.inMask: inputs.append(s)
if mask & self.outMask: outputs.append(s)
if mask & self.errMask: errors.append(s)
return inputs, outputs, errors
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,251
|
Anarchid/uberserver
|
refs/heads/master
|
/Client.py
|
import socket, time, sys, thread, ip2country, errno
from collections import defaultdict
from BaseClient import BaseClient
import CryptoHandler
from CryptoHandler import encrypt_sign_message
from CryptoHandler import decrypt_auth_message
from CryptoHandler import int32_to_str
from CryptoHandler import str_to_int32
from CryptoHandler import DATA_MARKER_BYTE
from CryptoHandler import DATA_PARTIT_BYTE
from CryptoHandler import UNICODE_ENCODING
class Client(BaseClient):
'this object represents one server-side connected client'
def __init__(self, root, connection, address, session_id):
'initial setup for the connected client'
self._root = root
self.conn = connection
# detects if the connection is from this computer
if address[0].startswith('127.'):
if root.online_ip:
address = (root.online_ip, address[1])
elif root.local_ip:
address = (root.local_ip, address[1])
self.ip_address = address[0]
self.local_ip = address[0]
self.port = address[1]
self.setFlagByIP(self.ip_address)
self.session_id = session_id
self.db_id = session_id
self.handler = None
self.static = False
self._protocol = None
self.removing = False
self.sendError = False
self.msg_id = ''
self.msg_sendbuffer = []
self.enc_sendbuffer = []
self.sendingmessage = ''
## time-stamps for encrypted data
self.incoming_msg_ctr = 0
self.outgoing_msg_ctr = 1
## note: this NEVER becomes false after LOGIN!
self.logged_in = False
self.status = '12'
self.is_ingame = False
self.cpu = 0
self.access = 'fresh'
self.accesslevels = ['fresh','everyone']
self.channels = []
self.battle_bots = {}
self.current_battle = None
self.battle_bans = []
self.ingame_time = 0
self.went_ingame = 0
self.spectator = False
self.battlestatus = {'ready':'0', 'id':'0000', 'ally':'0000', 'mode':'0', 'sync':'00', 'side':'00', 'handicap':'0000000'}
self.teamcolor = '0'
## copies of the DB User values, set on successful LOGIN
self.set_user_pwrd_salt("", ("", ""))
self.email = ''
self.hostport = None
self.udpport = 0
self.bot = 0
self.floodlimit = {
'fresh':{'msglength':1024*32, 'bytespersecond':1024*32, 'seconds':2},
'user':{'msglength':1024*32, 'bytespersecond':1024*32, 'seconds':10},
'bot':{'msglength':1024, 'bytespersecond':10000, 'seconds':5},
'mod':{'msglength':10000, 'bytespersecond':10000, 'seconds':10},
'admin':{'msglength':10000, 'bytespersecond':100000, 'seconds':10},
}
self.msg_length_history = {}
self.lastsaid = {}
self.current_channel = ''
self.debug = False
self.data = ''
# holds compatibility flags - will be set by Protocol as necessary
self.compat = defaultdict(lambda: False)
self.scriptPassword = None
now = time.time()
self.last_login = now
self.failed_logins = 0
self.register_date = now
self.lastdata = now
self.last_id = 0
self.users = set([]) # session_id
self.battles = set([]) # [battle_id] = [user1, user2, user3, etc]
self.ignored = {}
self._root.console_write('Client connected from %s:%s, session ID %s.' % (self.ip_address, self.port, session_id))
## AES cipher used for encrypted protocol communication
## with this client; starts with a NULL session-key and
## becomes active when client sends SETSHAREDKEY
self.set_aes_cipher_obj(CryptoHandler.aes_cipher(""))
self.set_session_key("")
self.set_session_key_received_ack(False)
def set_aes_cipher_obj(self, obj): self.aes_cipher_obj = obj
def get_aes_cipher_obj(self): return self.aes_cipher_obj
def set_session_key_received_ack(self, b): self.session_key_received_ack = b
def get_session_key_received_ack(self): return self.session_key_received_ack
def set_session_key(self, key): self.aes_cipher_obj.set_key(key)
def get_session_key(self): return (self.aes_cipher_obj.get_key())
def use_secure_session(self): return (len(self.get_session_key()) != 0)
def use_msg_auth_codes(self): return (self._root.use_message_authent_codes)
def set_msg_id(self, msg):
self.msg_id = ""
if (not msg.startswith('#')):
return msg
test = msg.split(' ')[0][1:]
if (not test.isdigit()):
return msg
self.msg_id = '#%s ' % test
return (' '.join(msg.split(' ')[1:]))
def setFlagByIP(self, ip, force=True):
cc = ip2country.lookup(ip)
if force or cc != '??':
self.country_code = cc
def Bind(self, handler=None, protocol=None):
if handler: self.handler = handler
if protocol:
if not self._protocol:
protocol._new(self)
self._protocol = protocol
##
## handle data from client
##
def Handle(self, data):
if (self.access in self.floodlimit):
msg_limits = self.floodlimit[self.access]
else:
msg_limits = self.floodlimit['user']
now = int(time.time())
self.lastdata = now # data received, store time to detect disconnects
bytespersecond = msg_limits['bytespersecond']
seconds = msg_limits['seconds']
if (now in self.msg_length_history):
self.msg_length_history[now] += len(data)
else:
self.msg_length_history[now] = len(data)
total = 0
for iter in dict(self.msg_length_history):
if (iter < now - (seconds - 1)):
del self.msg_length_history[iter]
else:
total += self.msg_length_history[iter]
if total > (bytespersecond * seconds):
if not self.access in ('admin', 'mod'):
if (self.bot != 1):
# FIXME: no flood limit for these atm, need to do server-side shaping/bandwith limiting
self.Send('SERVERMSG No flooding (over %s per second for %s seconds)' % (bytespersecond, seconds))
self.Remove('Kicked for flooding (%s)' % (self.access))
return
## keep appending until we see at least one newline
self.data += data
## if too much data has accumulated without a newline, clear
if (len(self.data) > (msg_limits['msglength'] * 32)):
del self.data; self.data = ""; return
if (self.data.count('\n') == 0):
return
self.HandleProtocolCommands(self.data.split(DATA_PARTIT_BYTE), msg_limits)
def HandleProtocolCommands(self, split_data, msg_limits):
assert(type(split_data) == list)
assert(type(split_data[-1]) == str)
msg_length_limit = msg_limits['msglength']
check_msg_limits = (not ('disabled' in msg_limits))
## either a list of commands, or a list of encrypted data
## blobs which may contain embedded (post-decryption) NLs
##
## note: will be empty if len(split_data) == 1
raw_data_blobs = split_data[: len(split_data) - 1]
## will be a single newline in most cases, or an incomplete
## command which should be saved for a later time when more
## data is in buffer
self.data = split_data[-1]
commands_buffer = []
def check_message_timestamp(msg):
ctr = str_to_int32(msg)
if (ctr <= self.incoming_msg_ctr):
return False
self.incoming_msg_ctr = ctr
return True
for raw_data_blob in raw_data_blobs:
if (len(raw_data_blob) == 0):
continue
if (self.use_secure_session()):
dec_data_blob = decrypt_auth_message(self.aes_cipher_obj, raw_data_blob, self.use_msg_auth_codes())
## can only happen in case of an invalid MAC or missing timestamp
if (len(dec_data_blob) < 4):
continue
## handle an encrypted client command, using the AES session key
## previously exchanged between client and server by SETSHAREDKEY
## (this includes LOGIN and REGISTER, key can be set before login)
##
## this assumes (!) a client message to be of the form
## ENCODE(ENCRYPT_AES("CMD ARG1 ARG2 ...", AES_KEY))
## where ENCODE is the standard base64 encoding scheme
##
## if this is not the case (e.g. if a command was sent UNENCRYPTED
## by client after session-key exchange) the decryption will yield
## garbage and command will be rejected
##
## NOTE:
## blocks of encrypted data are always base64-encoded and will be
## separated by newlines, but after decryption might contain more
## embedded newlines themselves (e.g. if encryption was performed
## over *batches* of plaintext commands)
##
## client --> C=ENCODE(ENCRYPT("CMD1 ARG11 ARG12 ...\nCMD2 ARG21 ...\n"))
## server --> DECRYPT(DECODE(C))="CMD1 ARG11 ARG12 ...\nCMD2 ARG21 ...\n"
##
## ignore any replayed messages
if (not check_message_timestamp(dec_data_blob[0: 4])):
continue
split_commands = dec_data_blob[4: ].split(DATA_PARTIT_BYTE)
strip_commands = [(cmd.rstrip('\r')).lstrip(' ') for cmd in split_commands]
else:
if (raw_data_blob[0] == DATA_MARKER_BYTE):
continue
## strips leading spaces and trailing carriage returns
strip_commands = [(raw_data_blob.rstrip('\r')).lstrip(' ')]
commands_buffer += strip_commands
for command in commands_buffer:
if (check_msg_limits and (len(command) > msg_length_limit)):
self.Send('SERVERMSG message-length limit (%d) exceeded: command \"%s...\" dropped.' % (msg_length_limit, command[0: 8]))
else:
self.HandleProtocolCommand(command)
def HandleProtocolCommand(self, cmd):
## probably caused by trailing newline ("abc\n".split("\n") == ["abc", ""])
if (len(cmd) <= 1):
return
self._protocol._handle(self, cmd)
def Remove(self, reason='Quit'):
while self.msg_sendbuffer:
self.FlushBuffer()
self.handler.finishRemove(self, reason)
##
## send data to client
##
def Send(self, data, batch = True):
## don't append new data to buffer when client gets removed
if ((not data) or self.removing):
return
if (self.handler.thread == thread.get_ident()):
data = self.msg_id + data
## this *must* always succeed (protocol operates on
## unicode internally, but is otherwise fully ASCII
## and will never send raw binary data)
if (type(data) == unicode):
data = data.encode(UNICODE_ENCODING)
assert(type(data) == str)
def wrap_encrypt_sign_message(raw_msg):
raw_msg = int32_to_str(self.outgoing_msg_ctr) + raw_msg
enc_msg = encrypt_sign_message(self.aes_cipher_obj, raw_msg, self.use_msg_auth_codes())
self.outgoing_msg_ctr += 1
return enc_msg
buf = ""
if (self.use_secure_session()):
## buffer encrypted data until we get client ACK
## (the most recent message will be at the back)
##
## note: should not normally contain anything of
## value, server has little to send before LOGIN
self.enc_sendbuffer.append(data)
if (self.get_session_key_received_ack()):
self.enc_sendbuffer.reverse()
## encrypt everything in the queue
## message order in reversed queue is newest to
## oldest, but we pop() from the back so client
## receives in proper order
if (batch):
while (len(self.enc_sendbuffer) > 0):
buf += (self.enc_sendbuffer.pop() + DATA_PARTIT_BYTE)
## batch-encrypt into one blob (more efficient)
buf = wrap_encrypt_sign_message(buf)
else:
while (len(self.enc_sendbuffer) > 0):
buf += wrap_encrypt_sign_message(self.enc_sendbuffer.pop() + DATA_PARTIT_BYTE)
else:
buf = data + DATA_PARTIT_BYTE
if (len(buf) == 0):
return
self.msg_sendbuffer.append(buf)
self.handler.poller.setoutput(self.conn, True)
def FlushBuffer(self):
# client gets removed, delete buffers
if self.removing:
self.msg_sendbuffer = []
self.sendingmessage = None
return
if not self.sendingmessage:
message = ''
while not message:
if not self.msg_sendbuffer: # just in case, since it returns before going to the end...
self.handler.poller.setoutput(self.conn, False)
return
message = self.msg_sendbuffer.pop(0)
self.sendingmessage = message
senddata = self.sendingmessage# [:64] # smaller chunks interpolate better, maybe base this off of number of clients?
try:
sent = self.conn.send(senddata)
self.sendingmessage = self.sendingmessage[sent:] # only removes the number of bytes sent
except UnicodeDecodeError:
self.sendingmessage = None
self._root.console_write('Error sending unicode string, message dropped.')
except socket.error, e:
if e == errno.EAGAIN:
return
self.msg_sendbuffer = []
self.sendingmessage = None
self.handler.poller.setoutput(self.conn, bool(self.msg_sendbuffer or self.sendingmessage))
# Queuing
def AddUser(self, user):
if type(user) in (str, unicode):
try: user = self._root.usernames[user]
except: return
session_id = user.session_id
if session_id in self.users: return
self.users.add(session_id)
self._protocol.client_AddUser(self, user)
def RemoveUser(self, user):
if type(user) in (str, unicode):
try: user = self._root.usernames[user]
except: return
session_id = user.session_id
if session_id in self.users:
self.users.remove(session_id)
self._protocol.client_RemoveUser(self, user)
def SendUser(self, user, data):
if type(user) in (str, unicode):
try: user = self._root.usernames[user]
except: return
session_id = user.session_id
if session_id in self.users:
self.Send(data)
def AddBattle(self, battle):
battle_id = battle.id
if battle_id in self.battles: return
self.battles.add(battle_id)
self._protocol.client_AddBattle(self, battle)
def RemoveBattle(self, battle):
battle_id = battle.id
if battle_id in self.battles:
self.battles.remove(battle_id)
self._protocol.client_RemoveBattle(self, battle)
def SendBattle(self, battle, data):
battle_id = battle.id
if battle_id in self.battles:
self.Send(data)
def isAdmin(self):
return ('admin' in self.accesslevels)
def isMod(self):
return self.isAdmin() or ('mod' in self.accesslevels) # maybe cache these
|
{"/server.py": ["/Client.py", "/XmlRpcServer.py", "/ip2country.py"], "/ip2country.py": ["/pygeoip/__init__.py"]}
|
16,262
|
IvannLovich/todoList-API
|
refs/heads/master
|
/todo/tests.py
|
from django.test import TestCase
from django.test import Client
from .models import Folder
client = Client()
class MainEndpointsTestCase(TestCase):
def test_folder_endpoint(self):
response = client.get('/api/todo/folders/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
def test_task_endpoint(self):
response = client.get('/api/todo/tasks/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,263
|
IvannLovich/todoList-API
|
refs/heads/master
|
/users/urls.py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import UserViewSet
router = DefaultRouter()
router.register('', UserViewSet, basename='users')
urlpatterns = router.urls
urlpatterns = [
path('', include(router.urls)),
]
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,264
|
IvannLovich/todoList-API
|
refs/heads/master
|
/users/tests.py
|
from django.test import TestCase
from django.test import Client
from .models import User
client = Client()
class UserEndpointTestCase(TestCase):
def test_user_endpoint(self):
response = client.get('/api/users/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,265
|
IvannLovich/todoList-API
|
refs/heads/master
|
/todo/views.py
|
from rest_framework import serializers
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from .serializers import TaskSerializer, FolderSerializer
from .models import Task, Folder
class FolderViewSet(viewsets.ModelViewSet):
serializer_class = FolderSerializer
queryset = Folder.objects.all()
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
filter_backends = (DjangoFilterBackend, )
filterset_fields = ('folder', )
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,266
|
IvannLovich/todoList-API
|
refs/heads/master
|
/todo/serializers.py
|
from rest_framework import serializers
from .models import Task, Folder
from users.models import User
from users.serializers import UserSerializer
class FolderSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
user_id = serializers.PrimaryKeyRelatedField(
write_only=True, queryset=User.objects.all(), source='user')
class Meta:
model = Folder
fields = '__all__'
class TaskSerializer(serializers.ModelSerializer):
folder = FolderSerializer(read_only=True)
folder_id = serializers.PrimaryKeyRelatedField(
write_only=True, queryset=Folder.objects.all(), source='folder')
class Meta:
model = Task
fields = ('id', 'title', 'completed', 'folder', 'folder_id')
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,267
|
IvannLovich/todoList-API
|
refs/heads/master
|
/todo/urls.py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import TaskViewSet, FolderViewSet
router = DefaultRouter()
router.register('folders', FolderViewSet, basename='folders')
router.register('tasks', TaskViewSet, basename='tasks')
urlpatterns = router.urls
urlpatterns = [
path('', include(router.urls)),
]
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,268
|
IvannLovich/todoList-API
|
refs/heads/master
|
/users/models.py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email is already exists.'
}
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
def __str__(self):
return self.username
def get_short_name(self):
return self.username
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,269
|
IvannLovich/todoList-API
|
refs/heads/master
|
/todo/models.py
|
from django.db import models
class Folder(models.Model):
name = models.TextField(max_length=100)
user = models.ForeignKey('users.User', related_name='folders', on_delete=models.CASCADE)
def __repr__(self):
return self.name
class Task(models.Model):
title = models.CharField(max_length=100)
completed = models.BooleanField(default=False)
folder = models.ForeignKey(
'Folder', related_name='tasks', on_delete=models.CASCADE)
def __repr__(self):
return self.title
|
{"/todo/tests.py": ["/todo/models.py"], "/users/tests.py": ["/users/models.py"], "/todo/views.py": ["/todo/serializers.py", "/todo/models.py"], "/todo/serializers.py": ["/todo/models.py", "/users/models.py"], "/todo/urls.py": ["/todo/views.py"]}
|
16,275
|
ATOM1Z3R/Mp3Player
|
refs/heads/master
|
/Models.py
|
try:
from sqlalchemy import Integer, String, Column, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker, backref
from sqlalchemy import create_engine
except ImportError:
print("sqlalchemy module is required")
exit()
Base = declarative_base()
class PlayList(Base):
__tablename__ = 'playlists'
id = Column(Integer, primary_key=True)
name = Column(String(50))
class PLDetails(Base):
__tablename__ = 'pldetails'
id = Column(Integer, primary_key=True)
element_path = Column(String(250))
playlist_id = Column(Integer, ForeignKey('playlists.id'))
playlist = relationship("PlayList", backref=backref('pldetails'))
class Settings(Base):
__tablename__ = 'settings'
id = Column(Integer, primary_key=True)
volume = Column(Integer)
def dbconnect():
engine = create_engine('sqlite:///atomdb.db')
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
return session
|
{"/audiolib.py": ["/settingslib.py"], "/settingslib.py": ["/Models.py"], "/playlistlib.py": ["/Models.py"], "/AtomPlayer.py": ["/Models.py", "/playlistlib.py", "/audiolib.py", "/settingslib.py"]}
|
16,276
|
ATOM1Z3R/Mp3Player
|
refs/heads/master
|
/audiolib.py
|
try:
from pydub import AudioSegment
from pydub.playback import play
except ImportError:
print("pydub and simpleaudio module is required")
print("ffmpeg app is required")
exit()
try:
from tqdm import tqdm
except ImportError:
print("tqdm module is required")
exit()
try:
import keyboard
except ImportError:
print("keyboard module is required")
exit()
try:
import psutil
except ImportError:
print("psutil module is required")
exit()
from multiprocessing import Process, Queue
from settingslib import getVolume
from os import getpid, kill, path
import time
def createAudioSegment(file_path):
file_format = "".join(file_path.split('.')[-1:])
audio = AudioSegment.from_file(file_path, file_format)
return audio
def audioInfo(audio_segment, file_path):
duration = int(audio_segment.duration_seconds)
file_name = "".join((path.basename(file_path)).split('.')[:1])
print(f"Now Playing: {file_name}")
for _ in tqdm(range(duration), ncols=85):
time.sleep(1)
print("\033[A\033[A \033[A")
def audioStart(audio_segment):
audio = audio_segment + getVolume()
play(audio)
def audioController(audio_proc, status_proc, queue):
paused = False
terminate = False
procs = (psutil.Process(audio_proc), psutil.Process(status_proc))
while True:
time.sleep(0.1)
if keyboard.is_pressed('alt+\\'):
if paused == False:
paused = True
for p in procs:
p.suspend()
time.sleep(0.3)
else:
paused = False
for p in procs:
p.resume()
time.sleep(0.3)
elif keyboard.is_pressed('alt+;'):
for p in procs:
kill(p.pid, 9)
print('\nTerminated by User')
terminate = True
break
elif keyboard.is_pressed('alt+]'):
print("\033[A \033[A")
for p in procs:
kill(p.pid, 9)
break
queue.put(terminate)
def startPlayList(audioList):
for i in audioList:
if "".join(i.split('.')[-1:]) in ['wav', 'mp3', 'mp4', 'm4a', 'flac', 'ogg']:
try:
terminate = Queue()
play = Process(target=audioStart, args=(createAudioSegment(i),))
info = Process(target=audioInfo, args=(createAudioSegment(i),i))
play.start()
info.start()
ac = Process(target=audioController, args=(play.pid, info.pid, queue))
ac.start()
play.join()
info.join()
ac.terminate()
if terminate.get():
break
except FileNotFoundError:
continue
|
{"/audiolib.py": ["/settingslib.py"], "/settingslib.py": ["/Models.py"], "/playlistlib.py": ["/Models.py"], "/AtomPlayer.py": ["/Models.py", "/playlistlib.py", "/audiolib.py", "/settingslib.py"]}
|
16,277
|
ATOM1Z3R/Mp3Player
|
refs/heads/master
|
/settingslib.py
|
from Models import *
session = dbconnect()
def getVolume():
volume = session.query(Settings).first()
return volume.volume
def setVolume(value):
if value < 101 and value > 0:
if value == 50:
vol = 0
else:
vol = value - 50
volume = session.query(Settings).first()
volume.volume = vol
session.commit()
return f"Volume set to {value}"
else:
return "ERROR | Type value between 1 and 100"
|
{"/audiolib.py": ["/settingslib.py"], "/settingslib.py": ["/Models.py"], "/playlistlib.py": ["/Models.py"], "/AtomPlayer.py": ["/Models.py", "/playlistlib.py", "/audiolib.py", "/settingslib.py"]}
|
16,278
|
ATOM1Z3R/Mp3Player
|
refs/heads/master
|
/playlistlib.py
|
from Models import *
from os.path import isfile, join
from os import listdir
session = dbconnect()
def listPlayList():
list_pl = session.query(PlayList).all()
return list_pl
def createPlayList(plname):
playList = PlayList(name=plname)
session.add(playList)
session.commit()
added = session.query(PlayList).filter(PlayList.name==plname).order_by(PlayList.id.desc()).first()
return f"CREATED // ID: {added.id} | Name: {added.name}"
def removePlayList(id_playlist):
del_obj = session.query(PlayList).get(id_playlist)
if del_obj == None:
return "Playlist not Exist"
session.delete(del_obj)
session.commit()
return "Playlist Deleted"
def addToPlayList(folder_path, playlist_id):
pldetails = PLDetails(element_path=folder_path, playlist_id=playlist_id)
session.add(pldetails)
session.commit()
def findPlayList(id):
count = session.query(PlayList).filter(PlayList.id==id).count()
if count == 1:
return True
else:
return False
def getPlayList(id):
list_pl = []
for record in session.query(PLDetails).filter(PLDetails.playlist_id == id):
path = record.element_path
for r in [i for i in listdir(path) if isfile(join(path, i))]:
list_pl.append(join(path,r))
return list_pl
|
{"/audiolib.py": ["/settingslib.py"], "/settingslib.py": ["/Models.py"], "/playlistlib.py": ["/Models.py"], "/AtomPlayer.py": ["/Models.py", "/playlistlib.py", "/audiolib.py", "/settingslib.py"]}
|
16,279
|
ATOM1Z3R/Mp3Player
|
refs/heads/master
|
/AtomPlayer.py
|
# simpleaudio and ffmpeg required
from Models import *
from playlistlib import *
from audiolib import *
from settingslib import setVolume
from os import listdir, system
from os.path import isdir
import sys
import getopt
def usage():
print("""AtomPlayer
Usage: atomplayer.py -p [playlist_id/catalog/audio_file]
-l --list - list playlists
-c --create [playlist_name] - create playlist with given name
-a --add [playlist_id] -f --folder [catalog_path] - add catalog to playlist
-r --remove [playlist_id] - remove playlist with given id
-v --volume [value] - set volume to given value
Controlls:
alt+\\ - pause/resume audio
alt+] - next audio
alt+; - quit program
Examples:
atomplayer.py -p audio.mp3
atomplayer.py -a 4 c:\\audio\\playlist
atomplayer.py -r 9
""")
def getAudioList(catalog_path):
audioList = listdir(catalog_path)
return [catalog_path+"\\"+s for s in audioList]
def main():
folder_path = ""
clear = lambda:system("cls")
add_flag = False
if not len(sys.argv[1:]):
usage()
try:
opts, args = getopt.getopt(sys.argv[1:], "hlc:r:a:f:p:v:",
["help","list","create","folder","add","remove", "volume", "play"])
except getopt.GetoptError as err:
print(str(err))
usage()
for o,a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-l", "--list"):
for item in listPlayList():
print(f"ID: {item.id} | {item.name}")
elif o in ("-c", "--create"):
print(createPlayList(a))
elif o in ("-r", "--remove"):
print(removePlayList(int(a)))
elif o in ("-v", "--volume"):
print(setVolume(int(a)))
elif o in ("-f", "--folder"):
folder_path = a
elif o in ("-a", "--add"):
add_flag = True
playlist_id = a
elif o in ("-p", "--play"):
try:
if a.isdigit() and int(a) >= 0:
startPlayList(getPlayList(int(a)))
elif isdir(a):
startPlayList(getAudioList(a))
else:
startPlayList([a])
except KeyboardInterrupt:
print("ERROR | Invalid argument. If file name contains spaces use quote: \"file name\".")
if add_flag == True and folder_path != "":
addToPlayList(folder_path, playlist_id)
print("Catalog has been added")
add_flag = False
if __name__=="__main__":
main()
|
{"/audiolib.py": ["/settingslib.py"], "/settingslib.py": ["/Models.py"], "/playlistlib.py": ["/Models.py"], "/AtomPlayer.py": ["/Models.py", "/playlistlib.py", "/audiolib.py", "/settingslib.py"]}
|
16,280
|
ckw1140/bert
|
refs/heads/main
|
/tests/test_bert.py
|
import torch
from model.bert import BERT, BERTPretrain
from model.config import Config
def test_bert():
config = Config.load("./tests/config.json")
batch_size = 8
inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))
segments = torch.randint(2, (batch_size, config.sequence_length))
bert = BERT(config)
outputs, outputs_cls, attention_probs = bert(inputs, segments)
assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)
assert outputs_cls.size() == (batch_size, config.hidden_dim)
assert len(attention_probs) == config.num_layers
assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
assert attention_probs[0].max() <= 1.0
def test_bert_pretrain():
config = Config.load("./tests/config.json")
batch_size = 8
inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))
segments = torch.randint(2, (batch_size, config.sequence_length))
bert_pretrain = BERTPretrain(config)
logits_cls, logits_lm, attention_probs = bert_pretrain(inputs, segments)
assert logits_cls.size() == (batch_size, 2)
assert logits_lm.size() == (batch_size, config.sequence_length, config.vocab_size)
assert len(attention_probs) == config.num_layers
assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
assert attention_probs[0].max() <= 1.0
|
{"/tests/test_bert.py": ["/model/bert.py"], "/model/bert.py": ["/model/layers.py"], "/model/layers.py": ["/model/utils.py"], "/tests/test_layers.py": ["/model/layers.py", "/model/utils.py"]}
|
16,281
|
ckw1140/bert
|
refs/heads/main
|
/model/bert.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.layers import Encoder
class BERT(nn.Module):
def __init__(self, config):
super(BERT, self).__init__()
self.config = config
self.encoder = Encoder(config)
self.linear = nn.Linear(config.hidden_dim, config.hidden_dim)
self.activation = torch.tanh
def forward(
self,
inputs: torch.Tensor,
segments: torch.Tensor,
):
outputs, attention_probs = self.encoder(inputs, segments)
# [batch_size, hidden_dim]
outputs_cls = outputs[:, 0].contiguous()
outputs_cls = self.linear(outputs_cls)
outputs_cls = self.activation(outputs_cls)
return outputs, outputs_cls, attention_probs
def save(self, epoch, loss, path):
torch.save(
{
"epoch": epoch,
"loss": loss,
"state_dict": self.state_dict()
},
path,
)
def load(self, path):
save = torch.load(path)
self.load_state_dict(save["state_dict"])
return save["epoch"], save["loss"]
class BERTPretrain(nn.Module):
def __init__(self, config):
super(BERTPretrain, self).__init__()
self.config = config
self.bert = BERT(config)
# Classifier
self.projection_cls = nn.Linear(config.hidden_dim, 2, bias=False)
# lm
self.projection_lm = nn.Linear(config.hidden_dim, config.vocab_size)
self.projection_lm.weight = self.bert.encoder.enc_emb.weight
def forward(
self,
inputs: torch.Tensor,
segments: torch.Tensor,
):
# [batch_size, sequence_length, hidden_dim]
# [batch_size, hidden_dim]
# [batch_size, num_heads, sequence_length, sequence_length] x num_layers
outputs, outputs_cls, attention_probs = self.bert(inputs, segments)
# [batch_size, 2]
logits_cls = self.projection_cls(outputs_cls)
# [batch_size, sequence_length, vocab_size]
logits_lm = self.projection_lm(outputs)
return logits_cls, logits_lm, attention_probs
|
{"/tests/test_bert.py": ["/model/bert.py"], "/model/bert.py": ["/model/layers.py"], "/model/layers.py": ["/model/utils.py"], "/tests/test_layers.py": ["/model/layers.py", "/model/utils.py"]}
|
16,282
|
ckw1140/bert
|
refs/heads/main
|
/model/layers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.utils import gelu, get_attention_pad_mask
class ScaledDotProductAttention(nn.Module):
def __init__(self, config):
super(ScaledDotProductAttention, self).__init__()
self.config = config
self.dropout = nn.Dropout(config.dropout_prob)
self.scale = 1 / (config.head_dim ** 0.5)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor,
):
scores = torch.matmul(query, key.transpose(2, 3)) * self.scale
scores.masked_fill_(attention_mask, -1e9)
# [batch_size, num_heads, sequence_length, sequence_length]
attention_prob = nn.Softmax(dim=-1)(scores)
attention_prob = self.dropout(attention_prob)
# [batch_size, num_heads, sequence_length, head_dim]
context = torch.matmul(attention_prob, value)
return context, attention_prob
class MultiHeadAttention(nn.Module):
def __init__(self, config):
super(MultiHeadAttention, self).__init__()
self.config = config
self.W_q = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)
self.W_k = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)
self.W_v = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)
self.scaled_dot_product_attention = ScaledDotProductAttention(config)
self.linear = nn.Linear(config.head_dim * config.num_heads, config.hidden_dim)
self.dropout = nn.Dropout(config.dropout_prob)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor,
):
batch_size = query.size(0)
# [batch_size, num_heads, sequence_length, head_dim]
Q = self.W_q(query).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)
K = self.W_k(key).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)
V = self.W_v(value).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)
# [batch_size, num_heads, sequence_length, sequence_length]
attention_mask = attention_mask.unsqueeze(1).repeat(1, self.config.num_heads, 1, 1)
# [batch_size, num_heads, sequence_length, head_dim]
# [batch_size, num_heads, sequence_length, sequence_length]
context, attention_prob = self.scaled_dot_product_attention(Q, K, V, attention_mask)
# [batch_size, sequence_length, num_heads * head_dim]
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.num_heads * self.config.head_dim)
# [batch_size, sequence_length, hidden_dim]
outputs = self.linear(context)
outputs = self.dropout(outputs)
return outputs, attention_prob
class FeedForward(nn.Module):
def __init__(self, config):
super(FeedForward, self).__init__()
self.config = config
self.linear1 = nn.Linear(config.hidden_dim, config.feed_forward_dim)
self.linear2 = nn.Linear(config.feed_forward_dim, config.hidden_dim)
self.dropout = nn.Dropout(config.dropout_prob)
def forward(
self,
inputs: torch.Tensor,
):
outputs = self.linear1(inputs)
outputs = gelu(outputs)
outputs = self.linear2(outputs)
return self.dropout(outputs)
class EncoderLayer(nn.Module):
def __init__(self, config):
super(EncoderLayer, self).__init__()
self.config = config
self.self_attention = MultiHeadAttention(config)
self.layer_norm1 = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_epsilon)
self.feed_forward = FeedForward(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_epsilon)
def forward(
self,
inputs: torch.Tensor,
attention_mask: torch.Tensor,
):
residual = inputs
outputs, attention_prob = self.self_attention(
query=inputs,
key=inputs,
value=inputs,
attention_mask=attention_mask,
)
outputs = self.layer_norm1(outputs + residual)
residual = outputs
outputs = self.feed_forward(outputs)
outputs = self.layer_norm2(outputs + residual)
return outputs, attention_prob
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
self.enc_emb = nn.Embedding(config.vocab_size, config.hidden_dim)
self.pos_emb = nn.Embedding(config.sequence_length + 1, config.hidden_dim)
self.seg_emb = nn.Embedding(config.num_segments, config.hidden_dim)
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.num_layers)])
def forward(
self,
inputs: torch.Tensor,
segments: torch.Tensor,
):
batch_size = inputs.size(0)
sequence_length = inputs.size(1)
positions = torch.arange(
sequence_length,
device=inputs.device,
dtype=inputs.dtype
)
positions = positions.expand(batch_size, sequence_length)
positions = positions.contiguous() + 1
# inputs 에서 값이 pad_token 인 위치들에 대응되는 positions 의 위치들의 값을 0으로 바꿔줍니다.
pos_mask = inputs.eq(self.config.pad_token)
positions.masked_fill_(pos_mask, 0)
# [batch_size, sequence_length, hidden_dim]
outputs = self.enc_emb(inputs) + self.pos_emb(positions) + self.seg_emb(segments)
# [batch_size, sequence_length, sequence_length]
attention_mask = get_attention_pad_mask(inputs, inputs, self.config.pad_token)
attention_probs = []
for layer in self.layers:
# outputs: [batch_size, sequence_length, hidden_dim]
# attention_probs: [batch_size, num_head, sequence_length, sequence_length]
outputs, attention_prob = layer(outputs, attention_mask)
attention_probs.append(attention_prob)
return outputs, attention_probs
|
{"/tests/test_bert.py": ["/model/bert.py"], "/model/bert.py": ["/model/layers.py"], "/model/layers.py": ["/model/utils.py"], "/tests/test_layers.py": ["/model/layers.py", "/model/utils.py"]}
|
16,283
|
ckw1140/bert
|
refs/heads/main
|
/tests/test_layers.py
|
import torch
from model.config import Config
from model.layers import (
ScaledDotProductAttention,
MultiHeadAttention,
FeedForward,
EncoderLayer,
Encoder,
)
from model.utils import get_attention_pad_mask
def test_scaled_dot_product_attention():
config = Config.load("./tests/config.json")
batch_size = 8
query = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])
key = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])
value = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])
attention_mask = torch.zeros([batch_size, config.num_heads, config.sequence_length, config.sequence_length])
scaled_dot_product_attention = ScaledDotProductAttention(config)
context, attention_prob = scaled_dot_product_attention(query, key, value, attention_mask)
assert context.size() == (batch_size, config.num_heads, config.sequence_length, config.head_dim)
assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
assert attention_prob.max() <= 1.0
def test_multi_head_attention():
config = Config.load("./tests/config.json")
batch_size = 8
query = torch.rand([batch_size, config.sequence_length, config.hidden_dim])
key = torch.rand([batch_size, config.sequence_length, config.hidden_dim])
value = torch.rand([batch_size, config.sequence_length, config.hidden_dim])
attention_mask = torch.zeros([batch_size, config.sequence_length, config.sequence_length])
multi_head_attention = MultiHeadAttention(config)
context, attention_prob = multi_head_attention(query, key, value, attention_mask)
assert context.size() == (batch_size, config.sequence_length, config.hidden_dim)
assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
assert attention_prob.max() <= 1.0
def test_feed_forward():
config = Config.load("./tests/config.json")
batch_size = 8
inputs = torch.rand([batch_size, config.sequence_length, config.hidden_dim])
feed_forward = FeedForward(config)
outputs = feed_forward(inputs)
assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)
def test_encoder_layer():
config = Config.load("./tests/config.json")
batch_size = 8
inputs = torch.rand([batch_size, config.sequence_length, config.hidden_dim])
attention_mask = torch.zeros([batch_size, config.sequence_length, config.sequence_length])
encoder_layer = EncoderLayer(config)
outputs, attention_prob = encoder_layer(inputs, attention_mask)
assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)
assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
def test_encoder():
config = Config.load("./tests/config.json")
batch_size = 8
inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))
segments = torch.randint(2, (batch_size, config.sequence_length))
encoder = Encoder(config)
outputs, attention_probs = encoder(inputs, segments)
assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)
assert len(attention_probs) == config.num_layers
assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)
|
{"/tests/test_bert.py": ["/model/bert.py"], "/model/bert.py": ["/model/layers.py"], "/model/layers.py": ["/model/utils.py"], "/tests/test_layers.py": ["/model/layers.py", "/model/utils.py"]}
|
16,284
|
ckw1140/bert
|
refs/heads/main
|
/model/utils.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def gelu(inputs: torch.Tensor):
"""
https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_tf_gpt2.py
를 참고하여 작성하였습니다.
"""
cdf = 0.5 * (1.0 + torch.tanh((np.sqrt(2 / np.pi) * (inputs + 0.044715 * torch.pow(inputs, 3)))))
return inputs * cdf
def get_attention_pad_mask(
query: torch.Tensor,
key: torch.Tensor,
pad_token: int,
):
"""
attention_prob 에서 key 에 대응되는 위치가 pad_token 이라 masking 되야하는 위치에 True 값을,
나머지 위치들에 대해서는 False 값을 갖는 Tensor를 반환합니다.
"""
batch_size, query_length = query.size()
batch_size, key_length = key.size()
# attention_pad_mask.requires_grad=False
# [batch_size, key_length]
attention_pad_mask = key.data.eq(pad_token)
# [batch_size, 1, key_length]
attention_pad_mask = attention_pad_mask.unsqueeze(1)
# [batch_size, query_length, key_length]
attention_pad_mask = attention_pad_mask.expand(batch_size, query_length, key_length)
return attention_pad_mask
|
{"/tests/test_bert.py": ["/model/bert.py"], "/model/bert.py": ["/model/layers.py"], "/model/layers.py": ["/model/utils.py"], "/tests/test_layers.py": ["/model/layers.py", "/model/utils.py"]}
|
16,285
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/audio.py
|
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
player = None
playing = False
def on_tag(bus, msg):
taglist = msg.parse_tag()
print('on_tag:')
for key in taglist.keys():
print('\t%s = %s' % (key, taglist[key]))
def init_player():
global player
Gst.init([])
player = Gst.ElementFactory.make("playbin", "player")
bus = player.get_bus()
bus.enable_sync_message_emission()
bus.add_signal_watch()
bus.connect('message::tag', on_tag)
def set_uri(uri):
global player
player.set_property('uri', uri)
def set_playing(bool_playing):
global playing
if bool_playing:
playing = True
playing = False
def is_playing():
return playing
def play():
global player
set_playing(1)
player.set_state(Gst.State.PLAYING)
def null():
global player
set_playing(0)
player.set_state(Gst.State.NULL)
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,286
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/helpers.py
|
# coding=utf-8
import pickle
import requests
XPATH_URL_OVERVIEW = '//ul/li/a[text()="Nachhören"]/@href'
XPATH_DATE_OVERVIEW = '//span[@class="date"]/text()'
XPATH_NAME_OVERVIEW = '//h3/text()'
def write_page_content_to_file(file, page_content):
with open(file, 'wb') as f:
pickle.dump(page_content, f)
def get_page_from_file(file):
page = lambda: None
with open(file, 'rb') as f:
page.content = pickle.load(f)
return page
def xpath_query(html_tree, xpath_str):
"""Apply xpath query to html tree, return list of elements"""
return html_tree.xpath(xpath_str)
def xpath_query_single_element(html_tree, xpath_str):
query = xpath_query(html_tree, xpath_str)
query_result = None
if len(query):
query_result = query[0]
return query_result
def query_url_overview(subtree):
program_url = xpath_query_single_element(subtree, XPATH_URL_OVERVIEW)
return program_url
def query_date_overview(subtree):
program_date = xpath_query_single_element(subtree, XPATH_DATE_OVERVIEW)
return program_date
def query_name_overview(subtree):
program_name = xpath_query_single_element(subtree, XPATH_NAME_OVERVIEW)
return program_name
def request_page_content(url):
req = requests.get(url)
return req
def query_name_episode(html_tree):
return xpath_query_single_element(html_tree, '//div[2]/h3/a/span/text()')
def query_url_episode(html_tree):
return xpath_query_single_element(html_tree, '//div[2]/h3/a/@href')
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,287
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/core.py
|
import os.path
from lxml import html, etree
from dlfninja.episode import Episode
from dlfninja.helpers import xpath_query, query_url_overview, query_date_overview, \
query_name_overview, get_page_from_file, request_page_content, write_page_content_to_file, \
query_name_episode, query_url_episode
from dlfninja.program import Program
XPATH_SUBTREE_PROGRAM = '//*[@id="content"]/div/section[1]/div[1]/article'
DLF_URL = 'http://www.deutschlandfunk.de/'
programs = []
def update_episode_list(program, html_tree):
"""Scraps episodes from DLF page 'Nachhoeren' for a specific program"""
program.clear_episodes()
episode_trees = xpath_query(html_tree, '//*[@id="content"]/div/section[1]/div[1]/ul/li')
for i, episode_tree in enumerate(episode_trees):
new_episode = Episode(id=i)
subtree = etree.ElementTree(episode_tree)
new_episode.set_name(query_name_episode(subtree))
new_episode.set_url(query_url_episode(subtree))
program.add_episode(new_episode)
def print_programs():
for program in programs:
print(program)
def get_page_tree(url):
"""Returns the html tree for a given url and caches the page"""
file_name = url.split('/')[-1]
if os.path.isfile('data/'+file_name):
page = get_page_from_file('data/'+file_name)
else:
page = request_page_content(url)
write_page_content_to_file('data/'+file_name, page.content)
html_tree = html.fromstring(page.content)
return html_tree
def update_programs_list(overview_tree):
"""Scraps programs from DLF page 'Alle Sendungen'"""
del programs[:]
program_trees = xpath_query(overview_tree, XPATH_SUBTREE_PROGRAM)
for i, program_tree in enumerate(program_trees):
new_program = Program(id=i)
subtree = etree.ElementTree(program_tree)
new_program.set_name(query_name_overview(subtree))
new_program.set_date(query_date_overview(subtree))
url = query_url_overview(subtree)
new_program.set_url(url)
if url is None:
new_program.set_disabled()
programs.append(new_program)
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,288
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/program.py
|
class Program:
name = 'unknown'
url = None
date = None
details = None
id = None
episodes = []
disabled = False
def __init__(self, name=None, id=None):
if name is not None:
self.name = name
self.id = id
def __str__(self):
return "<id=\"%d\" name=\"%s\" date=\"%s\" url=\"%s\">" % (self.id, self.name, self.date, self.url)
def set_name(self, name):
self.name = name
def set_url(self, url):
self.url = url
def set_date(self, date):
self.date = date
def set_details(self, details):
self.details = details
def add_episode(self, episode):
self.episodes.append(episode)
def clear_episodes(self):
del self.episodes[:]
def print_episodes(self):
for episode in self.episodes:
print(episode)
def set_disabled(self):
self.disabled = True
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,289
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja.py
|
import curses
import dlfninja.core as dlf
import dlfninja.curses as dlfcurses
import dlfninja.audio as audio
def main(stdscr):
stdscr.keypad(True)
curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
stdscr.clear()
scr_height = curses.LINES
scr_width = curses.COLS
dlfcurses.init_overview_menu(dlf.programs, scr_width, scr_height)
banner = dlfcurses.Banner()
stdscr.refresh()
dlfcurses.overview_menu.draw()
banner.draw()
while True:
c = stdscr.getch()
if c == ord('q'):
break # Exit Program
elif c == curses.KEY_UP or c == ord('k'):
dlfcurses.active_menu.scroll_up()
elif c == curses.KEY_DOWN or c == ord('j'):
dlfcurses.active_menu.scroll_down()
elif c == curses.KEY_RIGHT:
dlfcurses.active_menu.expand_element()
elif c == curses.KEY_LEFT:
dlfcurses.active_menu = dlfcurses.overview_menu
elif c == ord('s'):
if audio.is_playing():
audio.null()
else:
audio.null()
stdscr.refresh()
dlfcurses.active_menu.draw()
banner.draw()
if __name__ == '__main__':
overview_tree = dlf.get_page_tree('http://www.deutschlandfunk.de/sendungen-a-z.348.de.html')
dlf.update_programs_list(overview_tree)
audio.init_player()
curses.wrapper(main)
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,290
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/curses.py
|
import curses
from dlfninja.core import get_page_tree, update_episode_list
import dlfninja.audio as audio
BANNER = """
______ _____ ________ ____ _____ _____ ____ _____ _____ _
|_ _ `.|_ _| |_ __ | |_ \|_ _||_ _||_ \|_ _| |_ _| / \
| | `. \ | | | |_ \_| | \ | | | | | \ | | | | / _ \
| | | | | | _ | _| | |\ \| | | | | |\ \| | _ | | / ___ \
_| |_.' /_| |__/ | _| |_ _| |_\ |_ _| |_ _| |_\ |_ | |__' | _/ / \ \_
|______.'|________||_____| |_____|\____||_____||_____|\____|`.____.'|____| |____|
v0.1.0"""
overview_menu = None
active_menu = None
episodes_menu = None
class Banner:
win = None
def __init__(self):
self.win = curses.newwin(10, 86, ((curses.LINES // 4) - 5), (curses.COLS - 86)//2 - 1)
def draw(self):
self.win.addstr(0, 0, BANNER, curses.color_pair(1))
self.win.refresh()
class Entry:
text = None
text_right = None
url = None
program = None
def set_text(self, text):
self.text = text
def set_text_right(self, text):
if text is not None:
self.text_right = text.strip().rstrip('.')
def set_url(self, url):
self.url = url
def set_program(self, program):
self.program = program
class Menu:
win = None
entries = None
selected = 0
title = None
subtext = None
width = 0
height = 0
pos_x = 0
pos_y = 0
max_lines = 0
type = None
def __init__(self, type):
self.entries = []
self.type = type
def set_title(self, title):
self.title = title
def set_subtext(self, subtext):
self.subtext = subtext
def scroll_down(self):
num_elements = len(self.entries)
if num_elements:
self.selected = (self.selected + 1) % num_elements
def scroll_up(self):
num_elements = len(self.entries)
if not num_elements:
return None
self.selected = self.selected - 1
if self.selected < 0:
self.selected = num_elements - 1
def init(self, x, y, w, h):
self.height = h
self.width = w
self.pos_x = x
self.pos_y = y
self.win = curses.newwin(h, w, y, x)
self.win.border(0)
self.max_lines = self.height - 2
def draw(self):
if self.win is None:
return None
self.win.clear()
self.win.border(0)
if self.title is not None:
self.win.addstr(0, 2, self.title, curses.color_pair(1))
if self.subtext is not None:
self.win.addstr(self.height - 1, 2, self.subtext, curses.color_pair(1))
if self.subtext is not None:
pass
for i in range(0, self.max_lines):
j = self.max_lines * (self.selected // self.max_lines)
if j+i >= len(self.entries):
break
entry = self.entries[j+i]
if j+i == self.selected:
if entry.text is not None:
self.win.addstr(i+1, 1, "> " + entry.text, curses.color_pair(1))
if entry.text_right is not None:
self.win.addstr(i+1, self.width - len(entry.text_right) - 2, entry.text_right, curses.color_pair(1))
else:
color = curses.color_pair(0)
if entry.program and entry.program.disabled:
color = curses.color_pair(2)
if entry.text is not None:
self.win.addstr(i+1, 3, entry.text, color)
if entry.text_right is not None:
self.win.addstr(i+1, self.width - len(entry.text_right) - 2, entry.text_right, color)
self.win.refresh()
def add_entry(self, entry):
self.entries.append(entry)
def expand_element(self):
selected_entry = self.entries[self.selected]
if self.type == 'overview':
if selected_entry.url is not None:
init_episodes_menu(selected_entry, curses.COLS, curses.LINES)
if self.type == 'episodes':
if selected_entry.url is not None:
audio.null()
audio.set_uri(selected_entry.url)
audio.play()
def init_overview_menu(programs, scr_width, scr_height):
global overview_menu
global active_menu
overview_menu = Menu('overview')
active_menu = overview_menu
overview_menu.init(0, scr_height // 2, scr_width, scr_height - scr_height // 2)
overview_menu.set_title(" Alle Sendungen({}) ".format(len(programs)))
overview_menu.set_subtext(" quit(q) play/pause(space) ")
for i, prog in enumerate(programs):
entry = Entry()
entry.set_text(str(i+1)+"\t"+prog.name)
entry.set_text_right(prog.date)
entry.set_url(prog.url)
entry.set_program(prog)
overview_menu.add_entry(entry)
def init_episodes_menu(entry, scr_width, scr_height):
global episodes_menu
global active_menu
active_menu.win.clear()
episodes_menu = Menu('episodes')
active_menu = episodes_menu
episodes_tree = get_page_tree('http://www.deutschlandfunk.de/'+entry.url)
update_episode_list(entry.program, episodes_tree)
episodes_menu.init(0, scr_height // 2, scr_width, scr_height - scr_height // 2)
episodes_menu.set_title(" {}({}) ".format(entry.text, len(entry.program.episodes)))
episodes_menu.set_subtext(" zurück(left) play/pause(space) ")
episodes_menu.draw()
for i, episode in enumerate(entry.program.episodes):
ep = Entry()
ep.set_text(str(i+1)+"\t"+episode.name)
# ep.set_text_right(episode.date)
ep.set_url(episode.url)
# ep.set_program(episode)
episodes_menu.add_entry(ep)
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,291
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/episode.py
|
class Episode:
name = None
date = None
url = None
id = None
length = None
author = None
available_until = None
program = None
def __init__(self, name=None, id=None):
if name is not None:
self.name = name
self.id = id
def __str__(self):
return "<name=\"%s\" date=\"%s\" url=\"%s\">" % (self.name, self.date, self.url)
def set_name(self, name):
self.name = name
def set_url(self, url):
self.url = url
def set_date(self, date):
self.date = date
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,292
|
Teszko/dlfninja
|
refs/heads/master
|
/dlfninja/input.py
|
def handle_input(c):
pass
|
{"/dlfninja/core.py": ["/dlfninja/episode.py", "/dlfninja/helpers.py", "/dlfninja/program.py"], "/dlfninja.py": ["/dlfninja/core.py", "/dlfninja/curses.py", "/dlfninja/audio.py"], "/dlfninja/curses.py": ["/dlfninja/core.py", "/dlfninja/audio.py"]}
|
16,303
|
filwaitman/jinja2-standalone-compiler
|
refs/heads/master
|
/jinja2_standalone_compiler/__init__.py
|
from __future__ import unicode_literals, print_function
import argparse
import fnmatch
import imp
import os
import re
import sys
from jinja2 import Environment, FileSystemLoader, StrictUndefined, defaults
try:
from colorama import init, Fore, Style
init(autoreset=True)
using_colorama = True
style_JINJA_FILE = Fore.MAGENTA
style_WARNING = Fore.YELLOW + Style.BRIGHT
style_SETTING = Fore.CYAN
style_RENDERED_FILE = Fore.CYAN
style_SUCCESS = Fore.GREEN
style_ALL_DONE = Fore.GREEN + Style.BRIGHT
except:
using_colorama = False
style_JINJA_FILE = ''
style_WARNING = ''
style_SETTING = ''
style_RENDERED_FILE = ''
style_SUCCESS = ''
style_ALL_DONE = ''
def print_log(msg, verbose_msg=False, verbose=False, silent=False):
if silent:
return
if not verbose and verbose_msg:
return
print(msg)
def render_template(jinja_template, extra_variables, output_options, jinja_environment, template_root):
environment = Environment(
loader=FileSystemLoader(template_root),
block_start_string=jinja_environment.get('BLOCK_START_STRING', defaults.BLOCK_START_STRING),
block_end_string=jinja_environment.get('BLOCK_END_STRING', defaults.BLOCK_END_STRING),
variable_start_string=jinja_environment.get('VARIABLE_START_STRING', defaults.VARIABLE_START_STRING),
variable_end_string=jinja_environment.get('VARIABLE_END_STRING', defaults.VARIABLE_END_STRING),
comment_start_string=jinja_environment.get('COMMENT_START_STRING', defaults.COMMENT_START_STRING),
comment_end_string=jinja_environment.get('COMMENT_END_STRING', defaults.COMMENT_END_STRING),
line_statement_prefix=jinja_environment.get('LINE_STATEMENT_PREFIX', defaults.LINE_STATEMENT_PREFIX),
line_comment_prefix=jinja_environment.get('LINE_COMMENT_PREFIX', defaults.LINE_COMMENT_PREFIX),
trim_blocks=jinja_environment.get('TRIM_BLOCKS', True),
lstrip_blocks=jinja_environment.get('LSTRIP_BLOCKS', True),
newline_sequence=jinja_environment.get('NEWLINE_SEQUENCE', defaults.NEWLINE_SEQUENCE),
keep_trailing_newline=jinja_environment.get('KEEP_TRAILING_NEWLINE', defaults.KEEP_TRAILING_NEWLINE)
)
environment.undefined = StrictUndefined
dirname = os.path.dirname(jinja_template)
relpath = os.path.relpath(dirname, template_root)
basename = os.path.basename(jinja_template)
filename = os.path.join(relpath, basename)
template = environment.get_template(filename)
return template.render(extra_variables)
def main(path, out_path=None, verbose=False, silent=False, settings=None):
extra_variables = {}
ignore_jinja_templates = []
output_options = {}
jinja_environment = {}
if settings:
extra_variables = getattr(settings, 'EXTRA_VARIABLES', {})
ignore_jinja_templates = getattr(settings, 'IGNORE_JINJA_TEMPLATES', [])
output_options = getattr(settings, 'OUTPUT_OPTIONS', {})
jinja_environment = getattr(settings, 'JINJA_ENVIRONMENT', {})
print_log('Additional context and options:', True, verbose, silent)
print_log(' EXTRA_VARIABLES : {}'.format(extra_variables), True, verbose, silent)
print_log(' OUTPUT_OPTIONS : {}'.format(output_options), True, verbose, silent)
print_log(' JINJA_ENVIRONMENT: {}'.format(jinja_environment), True, verbose, silent)
if os.path.isdir(path):
print_log('Looking for jinja templates in: {}{}'.format(style_JINJA_FILE, path), False, verbose, silent)
template_root = path
jinja_templates = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jinja*'):
jinja_templates.append(os.path.join(root, filename))
else:
template_root = os.path.dirname(path)
jinja_templates = [path, ] # path is just a file, actually
print_log(' Jinja files found: {}{}'.format(style_JINJA_FILE, len(jinja_templates)), False, verbose, silent)
for jinja_template in jinja_templates:
print_log('Processing:' + style_JINJA_FILE + jinja_template, False, verbose, silent)
skip = False
for jinja_template_to_be_ignored in ignore_jinja_templates:
if re.match(jinja_template_to_be_ignored, jinja_template):
print_log(' Skipping: ' + style_WARNING + jinja_template, False, verbose, silent)
skip = True
break
if skip:
continue
if out_path:
rel_path = os.path.relpath(jinja_template, path)
if rel_path == '.':
rel_path = os.path.basename(path)
template_file = os.path.join(out_path, rel_path)
template_dir = os.path.dirname(template_file)
if not os.path.exists(template_dir):
try:
os.makedirs(template_dir)
except:
raise IOError('Cannot create sub output directory: {}'.format(template_dir))
template_file, _ = os.path.splitext(template_file)
else:
template_file, _ = os.path.splitext(jinja_template)
template_file = os.path.abspath(template_file)
if output_options.get('remove_double_extension', False):
template_file, _ = os.path.splitext(template_file)
template_file = '{}{}'.format(template_file, output_options.get('extension', '.html'))
print_log(' Creating: ' + style_RENDERED_FILE + template_file, False, verbose, silent)
if os.path.abspath(jinja_template) == os.path.abspath(template_file):
raise IOError("write target is also a source file, aborting to prevent blanking")
try:
with open(template_file, 'w') as f:
f.write(render_template(
jinja_template,
extra_variables=extra_variables,
output_options=output_options,
jinja_environment=jinja_environment,
template_root=template_root
))
except:
os.unlink(template_file)
raise
def _first_set(*values):
for value in values:
if value is not None:
return value
def main_command(path=None, settings=None, out=None, verbose=None, silent=None):
has_path = bool(path)
path_help = '{}Path to base files.'.format('' if has_path else '[REQUIRED] ')
parser = argparse.ArgumentParser(description='jinja2_standalone_compiler')
parser.add_argument('--path', dest='path', help=path_help, required=not(has_path))
parser.add_argument('--settings', '-s', dest='settings', action='append', nargs=1, help='Settings file(s) to use.')
parser.add_argument('--out', '-o', dest='out', help='Output path.')
parser.add_argument('--verbose', dest='verbose', help='Detailed output.', action='store_true', default=False)
parser.add_argument('--silent', dest='silent', help='Suppress output.', action='store_true', default=False)
args = parser.parse_args()
args_settings = None
if args.settings:
args_settings = [x[0] for x in args.settings]
path = _first_set(path, args.path)
settings = _first_set(settings, args_settings)
out = _first_set(out, args.out)
verbose = _first_set(verbose, args.verbose)
silent = _first_set(silent, args.silent)
current_dir = os.getcwd()
if not using_colorama and not silent:
print("<optional dependency 'colorama' not found, try 'pip install colorama==0.3.7' to see colored output>")
if out and not os.path.exists(out):
out = os.path.normpath(out)
try:
os.makedirs(out)
except:
raise IOError('Cannot create output directory: {}'.format(out))
if settings:
if not silent:
print_log('{}Number of settings files: {}'.format(style_SUCCESS, len(settings)), False, verbose, silent)
for setting in settings:
settings_file = os.path.normpath(os.path.join(current_dir, setting))
if not os.path.exists(settings_file):
raise IOError('Settings file not found: {}'.format(settings_file))
else:
if not silent:
print_log('Using settings file: ' + style_SETTING + settings_file, False, verbose, silent)
sys.path.insert(0, '')
setting = imp.load_source(current_dir, setting)
work_dir = os.path.normpath(os.path.join(current_dir, path))
main(work_dir, out, verbose, silent, setting)
print_log(style_SUCCESS + 'Done.', False, verbose, silent)
else:
work_dir = os.path.join(current_dir, path)
main(work_dir, out, verbose, silent)
print_log(style_ALL_DONE + 'All done.', False, verbose, silent)
|
{"/tests/test_jinja_standalone_compiler.py": ["/jinja2_standalone_compiler/__init__.py"]}
|
16,304
|
filwaitman/jinja2-standalone-compiler
|
refs/heads/master
|
/tests/test_jinja_standalone_compiler.py
|
from __future__ import unicode_literals
from collections import namedtuple
import fnmatch
import os
import unittest
from jinja2 import UndefinedError
from jinja2_standalone_compiler import main
fixtures_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures')
class MainTestCase(unittest.TestCase):
def tearDown(self):
for root, dirnames, filenames in os.walk(fixtures_dir):
for filename in fnmatch.filter(filenames, '*.html'):
os.unlink(os.path.join(root, filename))
def test_extends(self):
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'child_base', 'child.html')))
main(os.path.join(fixtures_dir, 'child_base'))
self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'child_base', 'child.html')))
file_content = open(os.path.join(fixtures_dir, 'child_base', 'child.html')).read()
self.assertEquals(file_content, 'begin parent\nparent content\n\nchild content\nend parent')
def test_extends_and_include(self):
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))
main(os.path.join(fixtures_dir, 'header_footer'))
self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))
file_content = open(os.path.join(fixtures_dir, 'header_footer', 'child.html')).read()
self.assertEquals(file_content, 'header!\nbegin parent\nparent content\n\nchild content\nend parent\nfooter!')
def test_extends_and_include_and_custom_vars(self):
Settings = namedtuple('Settings', ['EXTRA_VARIABLES'])
settings = Settings(EXTRA_VARIABLES={'number': 42, 'triplicate': lambda x: x * 3})
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'custom_vars', 'child.html')))
main(os.path.join(fixtures_dir, 'custom_vars'), settings=settings)
self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'custom_vars', 'child.html')))
file_content = open(os.path.join(fixtures_dir, 'custom_vars', 'child.html')).read()
self.assertEquals(file_content, 'header!\nbegin parent\nparent content\n\nchild content\nworks! works! works! '
'\n42 * 2 = 84\nend parent\nfooter!')
def test_ignore_jinja_templates(self):
Settings = namedtuple('Settings', ['IGNORE_JINJA_TEMPLATES'])
settings = Settings(IGNORE_JINJA_TEMPLATES=['.*base.jinja', ])
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'base.html')))
main(os.path.join(fixtures_dir, 'header_footer'), settings=settings)
self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'base.html')))
def test_undefined_vars_raises_errors(self):
Settings = namedtuple('Settings', ['EXTRA_VARIABLES'])
settings = Settings(EXTRA_VARIABLES={'name': 'Filipe Waitman'})
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))
self.assertRaises(UndefinedError, main, path=os.path.join(fixtures_dir, 'undefined_vars'))
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))
self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))
main(os.path.join(fixtures_dir, 'undefined_vars'), settings=settings)
self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))
|
{"/tests/test_jinja_standalone_compiler.py": ["/jinja2_standalone_compiler/__init__.py"]}
|
16,305
|
filwaitman/jinja2-standalone-compiler
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
VERSION = '1.3.1'
setup(
name='jinja2_standalone_compiler',
packages=['jinja2_standalone_compiler', ],
version=VERSION,
author='Filipe Waitman',
author_email='filwaitman@gmail.com',
install_requires=[x.strip() for x in open('requirements.txt').readlines()],
url='https://github.com/filwaitman/jinja2-standalone-compiler',
download_url='https://github.com/filwaitman/jinja2-standalone-compiler/tarball/{}'.format(VERSION),
test_suite='tests',
keywords=['Jinja2', 'Jinja', 'renderer', 'compiler', 'HTML'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Operating System :: OS Independent",
],
entry_points="""\
[console_scripts]
jinja2_standalone_compiler = jinja2_standalone_compiler:main_command
""",
)
|
{"/tests/test_jinja_standalone_compiler.py": ["/jinja2_standalone_compiler/__init__.py"]}
|
16,306
|
filwaitman/jinja2-standalone-compiler
|
refs/heads/master
|
/settings_example.py
|
# -*- coding: utf-8 -*-
# Which templates don't you want to generate? (You can use regular expressions here!)
# Use strings (with single or double quotes), and separate each template/regex in a line terminated with a comma.
IGNORE_JINJA_TEMPLATES = [
'.*base.jinja',
'.*tests/.*'
]
# Here you can override the default jinja environment setup
JINJA_ENVIRONMENT = {
# 'BLOCK_START_STRING': '{%',
# 'BLOCK_END_STRING': '%}',
# 'VARIABLE_START_STRING': '{{',
# 'VARIABLE_END_STRING': '}}',
# 'COMMENT_START_STRING': '{#',
# 'COMMENT_END_STRING': '#}',
# 'LINE_STATEMENT_PREFIX': None,
# 'LINE_COMMENT_PREFIX': None,
# 'TRIM_BLOCKS': True, # Jinja default is False
# 'LSTRIP_BLOCKS': True, #Jinja default is False
# 'NEWLINE_SEQUENCE': '\n',
# 'KEEP_TRAILING_NEWLINE': False
}
# Do you have any additional variables to the templates? Put 'em here! (use dictionary ('key': value) format)
EXTRA_VARIABLES = {
'project_name': 'WaitCorp',
'current_year': 2042,
'debug': False,
'triplicate': lambda x: x * 3
}
OUTPUT_OPTIONS = {
'extension': '.html', # Including leading '.'
'remove_double_extension': False # If you use something like sample.jinja.html
}
|
{"/tests/test_jinja_standalone_compiler.py": ["/jinja2_standalone_compiler/__init__.py"]}
|
16,322
|
LuennyEvelly/Projeto_Flask
|
refs/heads/master
|
/venv/views.py
|
from flask import render_template, request, redirect, session, flash, url_for
from models import Carro
from venv.carro import app, db
from dao import CarroDao, UsuarioDao
carro_dao = CarroDao(db)
usuario_dao = UsuarioDao(db)
@app.route('/')
def index():
return render_template('lista.html',titulo='carro', carros=carro_dao.listar())
@app.route('/novo')
def novo():
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('novo')))
return render_template('novo.html', titulo='Novo Carro')
@app.route('/criar', methods=['POST',])
def criar():
id = request.form['id']
marca = request.form['marca']
modelo = request.form['modelo']
cor = request.form['cor']
combustivel = request.form['combustivel']
ano = request.form['ano']
carro = Carro(marca, modelo, cor, combustivel, ano)
carro = carro_dao.salvar(carro)
return redirect(url_for('index'))
@app.route('/editar/<int:id>')
def editar(id):
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('editar', id=id)))
carro = carro_dao.busca_por_id(id)
return render_template('editar.html', titulo='Editando Carro', carro=carro)
@app.route('/atualizar', methods=['POST',])
def atualizar():
id = request.form['id']
marca = request.form['marca']
modelo = request.form['modelo']
cor = request.form['cor']
combustivel = request.form['combustivel']
ano = request.form['ano']
carro = Carro(marca, modelo, cor, combustivel, ano, id)
carro_dao.salvar(carro)
return redirect(url_for('index'))
@app.route('/deletar/<int:id>')
def deletar(id):
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('deletar', id=id)))
carro_dao.deletar(id)
flash("O carro foi removido com sucesso!")
return redirect(url_for('index'))
@app.route('/login')
def login():
proxima = request.args.get('proxima')
return render_template('login.html', titulo='login', proxima=proxima)
@app.route('/autenticar', methods=['POST',])
def autenticar():
usuario = usuario_dao.autenticar(request.form['usuario'], request.form['senha'])
if usuario:
session['usuario_logado'] = usuario.id
flash(usuario.nome + 'Logado com sucesso!')
proxima_pagina = request.form['proxima']
return redirect((proxima_pagina))
else:
flash('Usuário ou senha inválida, tente novamente!')
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session['usuario_logado'] = None
flash('Nenhum usuário logado!')
return redirect(url_for('index'))
@app.route('/carro/<int:id>')
def carro(id):
carro = carro_dao.busca_por_id(id)
return render_template('carros.html', titulo='Carro', carro=carro)
|
{"/venv/views.py": ["/venv/carro.py"]}
|
16,323
|
LuennyEvelly/Projeto_Flask
|
refs/heads/master
|
/venv/models.py
|
class Carro:
def _init_(self, marca, modelo, cor, combustivel, ano, id = None):
self.id = id
self.marca = marca
self.modelo = modelo
self.cor = cor
self.combustivel = combustivel
self.ano = ano
class Usuario:
def _init_(self, id, nome, senha):
self.id = id
self.nome = nome
self.senha = senha
|
{"/venv/views.py": ["/venv/carro.py"]}
|
16,324
|
LuennyEvelly/Projeto_Flask
|
refs/heads/master
|
/venv/dao.py
|
from models import Carro, Usuario
import psycopg2.extras
SQL_DELETA_CARRO = 'delete from carro where id = %s'
SQL_CARRO_POR_ID = 'SELECT id,marca, modelo, cor, combustivel, ano from carro where id = %s'
SQL_USUARIO_POR_ID = 'SELECT id, nome, senha from usuario where id = %s'
SQL_ATUALIZA_CARRO = 'UPDATE carro SET marca=%s, modelo=%s, cor=%s, combustivel=%s, ano=%s where id = %s'
SQL_BUSCA_CARRO = 'SELECT id, marca, modelo, cor, combustivel, ano from carro'
SQL_CRIA_CARRO = 'INSERT into carro ( marca, modelo, cor, combustivel, ano) values (%s, %s, %s, %s, %s) RETURNING id'
SQL_CRIA_USUARIO = 'INSERT into usuario (id, nome, senha) values (%s, %s, %s)'
SQL_ATUALIZA_USUARIO = 'UPDATE usuario SET id=%s, nome=%s, senha=%s where id = %s'
SQL_AUTENTICAR_USUARIO = 'SELECT id, nome, senha from usuario where id = %s AND senha = %s'
class CarroDao:
def _init_(self, db):
self.__db = db
def salvar(self, carro):
cursor = self.__db.cursor()
if (carro.id):
cursor.execute(SQL_ATUALIZA_CARRO, (carro.marca, carro.modelo, carro.cor, carro.combustivel, carro.ano, carro.id))
else:
cursor.execute(SQL_CRIA_CARRO, (carro.marca, carro.modelo, carro.cor, carro.combustivel, carro.ano))
carro.id = cursor.fetchone()[0]
self.__db.commit()
cursor.close()
return carro
def listar(self):
cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(SQL_BUSCA_CARRO)
carros = traduz_carros(cursor.fetchall())
cursor.close()
return carros
def busca_por_id(self, id):
cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(SQL_CARRO_POR_ID, (id,))
tupla = cursor.fetchone()
cursor.close()
return Carro(tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], id=tupla[0])
def deletar(self, id):
cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(SQL_DELETA_CARRO, (id,))
self.__db.commit()
cursor.close()
class UsuarioDao:
def _init_(self, db):
self.__db = db
def buscar_por_id(self,id):
cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(SQL_USUARIO_POR_ID, (id,))
dados = cursor.fetchone()
usuario = traduz_usuario(dados) if dados else None
return usuario
def autenticar(self, id, senha):
cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(SQL_AUTENTICAR_USUARIO, (id, senha))
dados = cursor.fetchone()
usuario = traduz_usuario(dados) if dados else None
return usuario
def salvar(self, usuario):
cursor = self.__db.cursor()
#if (usuario.id):
#cursor.execute(SQL_ATUALIZA_USUARIO,(usuario.id, usuario.nome, usuario.senha))
#else:
cursor.execute(SQL_ATUALIZA_USUARIO, (usuario.id, usuario.nome, usuario.senha))
self.__db.commit()
cursor.close()
return usuario
def traduz_usuario(tupla):
return Usuario(tupla[0], tupla[1], tupla[2])
def traduz_carros(carros):
def cria_carro_com_tupla(tupla):
return Carro( tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], id=tupla[0])
return list(map(cria_carro_com_tupla, carros))
|
{"/venv/views.py": ["/venv/carro.py"]}
|
16,325
|
LuennyEvelly/Projeto_Flask
|
refs/heads/master
|
/venv/carro.py
|
from flask import Flask
import psycopg2
app = Flask(_name_)
app.secret_key = '4intin'
db = psycopg2.connect(database='carros', user='postgres', password='postgres', host='127.0.0.1')
from views import *
if _name_ == '_main_':
app.run(debug=True)
|
{"/venv/views.py": ["/venv/carro.py"]}
|
16,326
|
he-xu/TransferRNN
|
refs/heads/master
|
/run_rnn_in_ctxctl.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 22:23:18 2019
@author: dzenn
"""
#import TransferRNN.run_rnn_in_ctxctl
import sys
sys.path.append('/home/dzenn/anaconda3/envs/ctxctl/lib/python3.7/site-packages')
from TransferRNN.MainRNNLoop import MainRNNLoop
from TransferRNN.bias_tuning_tools import BiasTools
print("Successful import")
bt = BiasTools()
exec(open("TransferRNN/biases/256_biases.py").read())
#bt.copy_biases(4,5)
#bt.copy_biases(4,12)
print("Clearing CAMs...")
bt.clear_cams(1)
bt.clear_cams(3)
print("Initializing the main loop")
MainLoop = MainRNNLoop(backend = "ctxctl")
print("Loading the dataset")
MainLoop.prepare_dataset("TransferRNN/data/")
print("Loading complete. Starting...")
MainLoop.run_loop(100)
#
MainLoop.export_error()
MainLoop.export_conn_log()
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,327
|
he-xu/TransferRNN
|
refs/heads/master
|
/force.py
|
def ternarize(w_new, cam_num):
w_order = np.argsort(np.abs(w_new.T), axis=0)
w_sorted = w_new.T[w_order, np.arange(w_order.shape[1])]
w_sorted[:-cam_num, :]=0
w_order_order = np.argsort(w_order, axis=0)
w_undone = w_sorted[w_order_order, np.arange(w_order_order.shape[1])].T
w_undone[w_undone>0] = 1
w_undone[w_undone<0] = -1
return w_undone
def update_weight(rate_psc, rate_teacher, w_real, w_ternary, n=6, m=1, cam_num=63, learning_rate=0.1):
rate_recurrent = w_ternary.dot(rate_psc)
rate_teacher_tile = np.tile(rate_teacher, (n,m))
error = rate_recurrent - rate_teacher_tile
d_w = 0
for t in range(num_timesteps):
r_t = rate_psc[:, t][:,np.newaxis]
P_up = P_prev.dot(r_t.dot(r_t.T.dot(P_prev)))
P_down = 1 + r_t.T.dot(P_prev.dot(r_t))
P_t = P_prev - P_up / P_down
e_t = error[:, t][:,np.newaxis]
d_w += e_t.dot(r_t.T.dot(P_t))
d_w = d_w / num_timesteps
w_new = w_ternary - learning_rate*d_w
w_ternary = ternarize(w_new, cam_num)
norm_ratio = np.linalg.norm(w_new, 'fro')/np.linalg.norm(w_ternary, 'fro')
if norm_ratio > 1:
c_grad = 1
else:
c_grad = -1
return w_ternary, c_grad
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,328
|
he-xu/TransferRNN
|
refs/heads/master
|
/NeuronNeuronConnector.py
|
import CtxDynapse
class DynapseConnector:
"""
Connector for DYNAP-se chip
"""
def __init__(self):
"""
Initialize the class with empty connections
sending_connections_to: map of (key, value), where key is the neuron sending connection and value is a list of all connections that start with the key neuron
receiving_connections_from: map of (key, value), where key is the neuron receiving connection and value is a list of all connections that end with the key neuron
"""
self.sending_connections_to = {}
self.receiving_connections_from = {}
self.sending_virtual_connections_to = {}
self.receiving_virtual_connections_from = {}
def _save_connection(self, pre_neuron, post_neuron):
self._add_to_list(self.sending_connections_to, pre_neuron, post_neuron)
self._add_to_list(self.receiving_connections_from, post_neuron, pre_neuron)
def _save_connection(self, pre_neuron, post_neuron):
self._add_to_list(self.sending_connections_to, pre_neuron, post_neuron)
self._add_to_list(self.receiving_connections_from, post_neuron, pre_neuron)
def _save_virtual_connection(self, pre_neuron, post_neuron):
self._add_to_list(self.sending_virtual_connections_to, pre_neuron, post_neuron)
self._add_to_list(
self.receiving_virtual_connections_from, post_neuron, pre_neuron
)
def _add_to_list(self, list_name, key, value):
if key in list_name:
list_name[key].append(value)
else:
list_name[key] = [value]
def _remove_connection(self, pre_neuron, post_neuron):
self._remove_from_list(self.sending_connections_to, pre_neuron, post_neuron)
self._remove_from_list(self.receiving_connections_from, post_neuron, pre_neuron)
def _remove_virtual_connection(self, pre_neuron, post_neuron):
self._remove_from_list(
self.sending_virtual_connections_to, pre_neuron, post_neuron
)
self._remove_from_list(
self.receiving_virtual_connections_from, post_neuron, pre_neuron
)
def _remove_from_list(self, list_name, key, value):
if key in list_name:
try:
list_name[key].remove(value)
if not list_name[key]:
list_name.pop(key)
except (ValueError, KeyError):
print(
"The neuron {} has no connection with neuron {}".format(key, value)
)
else:
raise IndexError("The neuron {} has no connections to remove".format(key))
def add_connection(self, pre, post, synapse_type):
"""
Connect two neurons.
Attributes:
pre: neuron that sends the connection
post: neuron that receives the connection
synapse_type: one of the four expected synapse types
"""
# check if one of the neurons is virtual
pre_virtual = pre.is_virtual()
post_virtual = post.is_virtual()
# if post_neuron is virtual, raise an error.
# Virtual neurons cannot receive connections
if post_virtual:
raise Exception(
"post neuron cannot be virtual. Virtual neurons do not receive connections."
)
if pre_virtual:
self.add_virtual_connection(pre, post, synapse_type)
return
pre_srams = pre.get_srams()
pre_core_id = pre.get_core_id()
pre_chip_id = pre.get_chip_id()
pre_neuron_id = pre.get_neuron_id()
post_cams = post.get_cams()
post_core_id = post.get_core_id()
post_chip_id = post.get_chip_id()
post_neuron_id = post.get_neuron_id()
target_chip = post_chip_id
# check if the pre can send a connection to target chip
pre_available = False
pre_sram = 0
for i in range(len(pre_srams)):
if pre_srams[i].get_target_chip_id() == target_chip:
pre_available = True
pre_sram = i
break
if not pre_available:
for i in range(len(pre_srams)):
if not pre_srams[i].is_used():
pre_available = True
pre_sram = i
break
if not pre_available:
raise Exception("pre neuron has no available outputs")
# check if the post can receive a connection
post_available = False
for i in range(len(post_cams)):
# if there is space left on post_cams
if (
post_cams[i].get_pre_neuron_id()
+ post_cams[i].get_pre_neuron_core_id() * 256
) == 0:
post_available = True
post_cam = i
break
if not post_available:
raise Exception("post neuron has no available inputs")
# connect
pre_neuron_address = pre_core_id * 256 + pre_neuron_id
post_neuron_address = post_core_id * 256 + post_neuron_id
virtual_core_id = 0
if pre_srams[pre_sram].is_used():
virtual_core_id = pre_srams[pre_sram].get_virtual_core_id()
else:
virtual_core_id = pre_core_id
core_mask = pre_srams[pre_sram].get_core_mask() | (1 << post_core_id)
d = (post_chip_id & 1) - (pre_chip_id & 1)
if d < 0:
sx = True
else:
sx = False
dx = abs(d)
d = ((post_chip_id & 2)>>1) - ((pre_chip_id & 2)>>1)
if d < 0:
sy = False
else:
sy = True
dy = abs(d)
pre_srams[pre_sram].set_virtual_core_id(virtual_core_id)
pre_srams[pre_sram].set_target_chip_id(post_chip_id)
pre_srams[pre_sram].set_sx(sx)
pre_srams[pre_sram].set_sy(sy)
pre_srams[pre_sram].set_dx(dx)
pre_srams[pre_sram].set_dy(dy)
pre_srams[pre_sram].set_used(True)
pre_srams[pre_sram].set_core_mask(core_mask)
post_cams[post_cam].set_pre_neuron_id(pre_neuron_id)
post_cams[post_cam].set_pre_neuron_core_id(pre_core_id)
post_cams[post_cam].set_type(synapse_type)
#CtxDynapse.dynapse.set_config_chip_id(pre_chip_id)
#CtxDynapse.dynapse.write_sram(
# pre_neuron_address, pre_sram, virtual_core_id, sx, dx, sy, dy, core_mask
#)
#
#if pre_chip_id != post_chip_id:
# CtxDynapse.dynapse.set_config_chip_id(post_chip_id)
#CtxDynapse.dynapse.write_cam(
# pre_neuron_address, post_neuron_address, post_cam, synapse_type
#)
self._save_connection(pre, post)
def add_connection_from_list(
self, pre_neurons_list, post_neuron_list, synapse_types
):
"""
Connect neurons using a python list of pre, post and synapse types.
Attributes:
pre_neurons_list: list with neurons that send the connection
post_neuron_list: list with neurons that receive the connection
synapse_types: list with the connection type between the neurons. It can be a list with one single
element indicating all the connections are of the same type, otherwise, the size of the synapse types list must
match the size of pre and post neurons list.
"""
if len(pre_neurons_list) != len(post_neuron_list):
print(
"The number of pre and post neurons must be the same. No connection will be created."
)
return
same_synapse_type = False
if len(synapse_types) == 1:
same_synapse_type = True
if (len(pre_neurons_list) != len(synapse_types)) and (not same_synapse_type):
print(
"The number of synapses type must match the number of connections. No connection will be created."
)
return
for i in range(len(pre_neurons_list)):
self.add_connection(
pre_neurons_list[i],
post_neuron_list[i],
synapse_types[0] if same_synapse_type else synapse_types[i],
)
def add_connection_from_file(self, connection_file_path):
"""
Connects neurons reading the values from a file. The file must contain three elements per line: pre neuron, post neuron and synapse type.
Attributes:
connection_file: file that contains three elements in each row: pre neuron, post neuron and synapse type.
"""
number_of_connections = 0
with open(connection_file_path, "r") as fp:
for i in fp.readlines():
tmp = i.split(" ")
# verify if there is 3 elements in the line
if len(tmp) == 3:
# connect
self.add_connection(tmp[0], tmp[1], tmp[2])
number_of_connections += 1
else:
print(
"Bad format error. Error in the line {}. The connections before this point were created.".format(
number_of_connections + 1
)
)
def add_virtual_connection(self, pre, post, synapse_type):
"""
Connect a virtual neuron with a real (on chip) neuron.
Attributes:
pre: neuron that sends the connection, it must be virtual
post: neuron that receives the connection, it must not be virtual
synapse_type: one of the four expected synapse types
"""
if not pre.is_virtual():
raise Exception("pre neuron must be virtual")
if post.is_virtual():
raise Exception("post neuron must not be virtual")
pre_core_id = pre.get_core_id()
pre_chip_id = pre.get_chip_id()
pre_neuron_id = pre.get_neuron_id()
post_cams = post.get_cams()
post_core_id = post.get_core_id()
post_chip_id = post.get_chip_id()
post_neuron_id = post.get_neuron_id()
# check if the post can receive a connection
post_available = False
for i in range(len(post_cams)):
# if there is space left on post_cams
if (
post_cams[i].get_pre_neuron_id()
+ post_cams[i].get_pre_neuron_core_id() * 256
) == 0:
post_available = True
post_cam = i
break
if not post_available:
raise Exception("post neuron has no available inputs")
# connect
pre_neuron_address = pre_core_id * 256 + pre_neuron_id
post_neuron_address = post_core_id * 256 + post_neuron_id
virtual_core_id = pre_core_id
post_cams[post_cam].set_pre_neuron_id(pre_neuron_id)
post_cams[post_cam].set_pre_neuron_core_id(pre_core_id)
post_cams[post_cam].set_type(synapse_type)
#CtxDynapse.dynapse.set_config_chip_id(post_chip_id)
#CtxDynapse.dynapse.write_cam(
# pre_neuron_address, post_neuron_address, post_cam, synapse_type
#)
self._save_virtual_connection(pre, post)
def remove_connection(self, pre_neuron, post_neuron):
"""
Delete the connection between two neurons.
Attributes:
pre_neuron: neuron that sends the connection
post_neuron: neuron that receives the connection
"""
# check if one of the neurons is virtual
pre_virtual = pre_neuron.is_virtual()
post_virtual = post_neuron.is_virtual()
# if post_neuron is virtual, raise an error.
# Virtual neurons do not receive connections, thus there is no connection to remove
if post_virtual:
raise Exception("post neuron is virtual, there is no connection to remove.")
if pre_virtual:
self.remove_virtual_connection(pre_neuron, post_neuron)
return
# first, try to remove the neurons from the lists. This will raise an exception if the neurons aren't connected.
# todo: handle exception
self._remove_connection(pre_neuron, post_neuron)
# now, we can remove the connections on chip
# get info about pre and post neurons
pre_srams = pre_neuron.get_srams()
pre_core_id = pre_neuron.get_core_id()
pre_chip_id = pre_neuron.get_chip_id()
pre_neuron_id = pre_neuron.get_neuron_id()
post_cams = post_neuron.get_cams()
post_core_id = post_neuron.get_core_id()
post_chip_id = post_neuron.get_chip_id()
post_neuron_id = post_neuron.get_neuron_id()
pre_neuron_address = pre_core_id * 256 + pre_neuron_id
post_neuron_address = post_core_id * 256 + post_neuron_id
# check what sram sends a connection to post neuron
pre_sram = 0
for i in range(len(pre_srams)):
if pre_srams[i].get_target_chip_id() == post_chip_id:
pre_sram = i
break
pre_virtual_core_id = pre_srams[pre_sram].get_virtual_core_id()
# check what cam receives a connection from pre neuron
post_cam = 0
for i in range(len(post_cams)):
if (post_cams[i].get_pre_neuron_core_id() * 256 + post_cams[i].get_pre_neuron_id()) == pre_neuron_address:
post_cam = i
break
#CtxDynapse.dynapse.set_config_chip_id(post_chip_id)
## information of post-synaptic neuron, setting the address of pre-synaptic neuron to zero
#CtxDynapse.dynapse.write_cam(0, post_neuron_address, post_cam, 0)
post_cams[post_cam].set_pre_neuron_id(0)
post_cams[post_cam].set_pre_neuron_core_id(0)
## updating pre-synaptic neuron
#if pre_chip_id != post_chip_id:
# CtxDynapse.dynapse.set_config_chip_id(pre_chip_id)
# if there is no other connections from pre neuron, set it to zero and mark it as unused
if pre_neuron not in self.sending_connections_to:
# information of pre-synaptic neuron, setting the address of post-synaptic neuron to zero
#CtxDynapse.dynapse.write_sram(
# pre_neuron_address, pre_sram, 0, 0, 0, 0, 0, 0
#)
pre_srams[pre_sram].set_used(False)
pre_srams[pre_sram].set_virtual_core_id(0)
pre_srams[pre_sram].set_target_chip_id(0)
pre_srams[pre_sram].set_sx(0)
pre_srams[pre_sram].set_sy(0)
pre_srams[pre_sram].set_dx(0)
pre_srams[pre_sram].set_dy(0)
pre_srams[pre_sram].set_core_mask(0)
# if there are other connections, check if they are projecting to the same core as the post-neuron
else:
post_list = self.sending_connections_to[pre_neuron]
found_post_same_core = False
for element in post_list:
if element.get_core_id() == post_core_id:
found_post_same_core = True
# if none of the connection go to the same core as the post_neuron, we set the corresponding bit of the core mask to 0
if not found_post_same_core:
core_mask = pre_srams[pre_sram].get_core_mask() & ~(0 << post_core_id)
#CtxDynapse.dynapse.write_sram(
# pre_neuron_address, pre_sram, 0, 0, 0, 0, 0, core_mask
#)
pre_srams[pre_sram].set_core_mask(core_mask)
def remove_connection_from_list(self, pre_neurons_list, post_neuron_list):
"""
Delete the connection between two list of neurons. The number of elements in each list must be the same.
Attributes:
pre_neurons_list: list of neurons that send the connection
post_neurons_list: list of neuron that receive the connection
"""
if len(pre_neurons_list) != len(post_neuron_list):
print(
"The number of pre and post neurons must be the same. No connection was removed."
)
return
for i in range(len(pre_neurons_list)):
# todo: handle exception
self.remove_connection(pre_neurons_list[i], post_neuron_list[i])
def remove_connection_from_file(self, unconnect_file_path):
"""
Delete the connection between neurons reading the values from a file. The file must contain two elements per line: pre neuron and post neuron.
Attributes:
connection_file: file that contains two elements in each row: pre neuron and post neuron.
"""
number_of_connections_removed = 0
with open(unconnect_file_path, "r") as fp:
for i in fp.readlines():
tmp = i.split(" ")
# verify if there is 2 elements in the line
if len(tmp) == 2:
# unconnect
# todo: catch exception
self.remove_connection(tmp[0], tmp[1])
number_of_connections_removed += 1
else:
print(
"Bad format error. Error in the line {}. The connections before this point were removed.".format(
number_of_connections_removed + 1
)
)
def remove_virtual_connection(self, pre_neuron, post_neuron):
"""
Delete the connection between a virtual neuron and a real (on chip) neuron.
Attributes:
pre_neuron: neuron that sends the connection - must be virtual
post_neuron: neuron that receives the connection - must not be virtual
"""
if not pre_neuron.is_virtual():
raise Exception("pre neuron must be virtual")
if post_neuron.is_virtual():
raise Exception("post neuron must not be virtual")
# check if this connection is on the lists.
# This will raise an exception if the neurons aren't connected.
# todo: handle exception
self._remove_virtual_connection(pre_neuron, post_neuron)
# now, we can remove the connection on chip
# we just need to clean the cam of the post, pre neuron is virtual
# get info about pre and post neurons
pre_core_id = pre_neuron.get_core_id()
pre_neuron_id = pre_neuron.get_neuron_id()
post_cams = post_neuron.get_cams()
post_core_id = post_neuron.get_core_id()
post_chip_id = post_neuron.get_chip_id()
post_neuron_id = post_neuron.get_neuron_id()
pre_neuron_address = pre_core_id * 256 + pre_neuron_id
post_neuron_address = post_core_id * 256 + post_neuron_id
# check what cam receives a connection from pre neuron
post_cam = 0
for i in range(len(post_cams)):
if post_cams[i] == pre_neuron_address:
post_cam = i
break
#CtxDynapse.dynapse.set_config_chip_id(post_chip_id)
# information of post-synaptic neuron, setting the address of pre-synaptic neuron to zero
#CtxDynapse.dynapse.write_cam(0, post_neuron_address, post_cam, 0)
post_cams[post_cam].set_pre_neuron_id(0)
post_cams[post_cam].set_pre_neuron_core_id(0)
def remove_sending_connections(self, neuron):
"""
Remove all connections leaving the informed neuron
Attributes:
neuron: the neuron passed as parameter will be considered the pre-synaptic neuron, and all the connections that leave this neuron will be removed.
"""
# todo: handle exception
if neuron in self.sending_connections_to:
connections = self.sending_connections_to[neuron]
for i in connections:
self.remove_connection(neuron, i)
def remove_receiving_connections(self, neuron):
"""
Remove all connections arriving in the informed neuron
Attributes:
neuron: the neuron passed as parameter will be considered the post-synaptic neuron, and all the connections that are sent to this neuron will be removed.
"""
if neuron.is_virtual:
raise Exception(
"neuron {} is virtual and receives no connection".format(neuron)
)
# todo: handle exception
if neuron in self.receiving_connections_from:
connections = self.receiving_connections_from[neuron]
for i in connections:
self.remove_connection(i, neuron)
def remove_all_connections(self, neuron):
"""
Remove all connections of a neuron, i.e., all the connections that the neuron send and receive will be removed.
Attributes:
neuron: the neuron that will have all its connections removed.
"""
self.remove_sending_connections(neuron)
if not neuron.is_virtual:
self.remove_receiving_connections(neuron)
if __name__ == "__main__":
model = CtxDynapse.model
neurons = model.get_shadow_state_neurons()
dynapse_connector = DynapseConnector()
if len(neurons) > 2:
dynapse_connector.add_connection(neurons[0], neurons[1], 3)
dynapse_connector.add_connection(neurons[0], neurons[2], 3)
else:
print("missing neurons to connect")
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,329
|
he-xu/TransferRNN
|
refs/heads/master
|
/MainRNNLoop.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 15:21:22 2019
@author: dzenn
"""
#exec(open("MainRNNLoop.py").read())
try:
from DynapseRNN import DynapseRNN
from matplotlib import pyplot as plt
from random import sample
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
except ModuleNotFoundError:
print("Trying to load Dynapse RNN from outside TransferRNN directory")
from TransferRNN.DynapseRNN import DynapseRNN
#try:
# from bias_tuning_tools import BiasTools
#except ModuleNotFoundError:
# from TransferRNN.bias_tuning_tools import BiasTools
import numpy as np
import datetime
import pickle
class MainRNNLoop():
def __init__(self, num_inputs = 128, timesteps = 28, multiplex_factor = 2, backend = "ctxctl", c = None):
self.RNNController = DynapseRNN(num_inputs=num_inputs, timesteps=timesteps, multiplex_factor=multiplex_factor, backend=backend, c=c, debug=True)
self.backend = backend
self.num_inputs = num_inputs
self.timesteps = timesteps
self.multiplex_factor = multiplex_factor
self.c = c
self.recorded_error = []
def prepare_dataset(self, dataset_path = ""):
"""
Loads datasets (input, teacher and mnist) into MainRNNLoop internal variables
Args:
dataset_path (str, optional) : path to datasets
"""
projection_train = open(dataset_path + 'x_projection_train.pkl', 'rb')
state_train = open(dataset_path + 'state_train.pkl', 'rb')
mnist = open(dataset_path + 'mnist.pkl','rb')
u_1 = pickle._Unpickler(projection_train)
u_2 = pickle._Unpickler(state_train)
u_3 = pickle._Unpickler(mnist)
u_1.encoding = 'latin1'
u_2.encoding = 'latin1'
u_3.encoding = 'latin1'
self.projection_train_data = u_1.load()
self.state_train_data = u_2.load()
# self.mnist_train, self.mnist_test, self.mnist_validate = u_3.load() # load validation set
def export_error(self, filename = "RNNError.dat"):
"""
Export mean error log to a file
Args:
filename (str, optional)
"""
# f_out = open(str(datetime.datetime.now()) + " " + filename, "wb")
f_out = open(filename, "w")
for err in self.recorded_error:
f_out.write(str(err) + "\n")
f_out.close()
def export_conn_log(self, filename = "RNNConnLog.dat"):
"""
Export recorded connectivity changes
Args:
filename (str, optional)
"""
# f_out = open(str(datetime.datetime.now()) + " " + filename, "wb")
f_out = open(filename, "w")
for item in self.RNNController.conn_log:
f_out.write(str(item[0]) + " " + str(item[1]) + "\n")
f_out.close()
def start(self):
pass
def run_loop(self, num_images):
"""
Run the image presentation loop num_images times.
The loop consists of 6 steps:
1. Start recording spikes (initialize event filters)
2. Present the input spike trains (actual realtime image presetation)
3. Stop recording (clear event filters)
4. Compute rates based on recorded events by binning spikes by neuron indices and timesteps
5. Compute the new ternary weight matrix based on the rates and the state_train_data (takes top cam_num largest gradients per neuron with stochastic rounding)
6. Apply the new weight matrix to the chip
Note:
Steps 2 and 6 take the most amount of time
"""
for image_idx in range(num_images):
print("Start recording")
self.RNNController.start_recording_spikes()
print("Showing digit %d" % (image_idx))
self.RNNController.present_stimulus(self.projection_train_data[image_idx], 2/6)
print("Stopping the recording")
self.RNNController.stop_recording_spikes()
print("Processing recorded evts...")
rates = self.RNNController.process_recorded_evts()
print(np.array(rates)/100)
if self.backend == 'rpyc':
ids = sample(range(self.num_inputs), 4)
rate_recurrent = self.RNNController.w_ternary.dot(np.array(rates)/100)
rate_teacher = self.state_train_data[0]
rate_teacher_tile = np.tile(rate_teacher.T, (self.multiplex_factor,1))
for idx in ids:
plt.figure(idx)
plt.plot(range(len(rate_recurrent[idx])), rate_recurrent[idx], 'r--', range(len(rate_teacher_tile[idx])), rate_teacher_tile[idx], 'b--')
plt.show()
print("Computing gradients...")
c_grad, mean_error = self.RNNController.update_weight(np.array(rates)/100, (self.state_train_data[0]), learning_rate = 0.01)
self.recorded_error.append(mean_error)
self.RNNController.apply_new_matrix(self.RNNController.w_ternary, False)
print("C_grad: %g, mean_error %g" % (c_grad, mean_error))
print("Done")
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,330
|
he-xu/TransferRNN
|
refs/heads/master
|
/bias_tuning_tools.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 12:51:51 2018
Set of tools to simplify the bias tuning process of the DYNAP-SE chip
using aiCTX's cortexcontrol.
Load and instantiate Bias Tuning Tools (BiTTs) in cortexcontrol console:
>>> import bias_tuning_tools
>>> bt = bias_tuning_tools.BiasTools()
Several actions available:
Monitoring I_mem:
Use bt.monitor(core_id, neuron_id) to output I_mem to the respective DYNAP-SE output
Stimulating a core with steps of DC current:
Use bt.dc_steps(core_id, period, time_interval, coarse_val) to run steps of current with
specified period and aplitude for the duration of the specified time interval.
Stimulating a chip with a periodic spike train:
Step 1:
Use bt.connect_spikegen(chip_id, syn_type) to select the chip and the synapse to use.
For convinience, syn_type is integer, 0 is SLOW_INH, 1 is FAST_INH, 2 is SLOW_EXC, 3 is FAST_EXC
Step 2:
Use bt.set_rate(rate) to set the rate of the spike train and start spikegen.
You can always start and stop spiken with bt.spikegen.start() and bt.spikegen.stop()
and check the status with bt.spikegen.is_running()
Saving and loading biases with wrapper functions:
bt.save_biases(filename) and bt.load_biases(filename).
IMPORTANT: Make sure the filename ends with *.py
##TODO: Add a check for "*.py"
Copying biases:
Use bt.copy_biases(source_core, target_core) to copy the full set of biases from one
core to another.
@author: dzenn
"""
# import bias_tuning_tools; bt = bias_tuning_tools.BiasTools();
# import imp; imp.reload(bias_tuning_tools);
#import sys
#sys.path.append('/home/dzenn/anaconda3/envs/ctxctl3.7/lib/python3.7/site-packages')
#sys.path.append('/home/dzenn/anaconda3/envs/ctxctl/lib/python3.7/site-packages')
#sys.path.append('/home/theiera/gitlab/NCS/CTXCTL/cortexcontrol')
#import numpy as np
from time import sleep, clock
import CtxDynapse
from NeuronNeuronConnector import DynapseConnector
import PyCtxUtils
from CtxDynapse import DynapseCamType as SynTypes
#import numpy as np
class BiasTools(object):
def __init__(self):
self.model = CtxDynapse.model
self.virtual_model = CtxDynapse.VirtualModel()
self.groups = self.model.get_bias_groups()
self.neurons = self.model.get_shadow_state_neurons()
self.virtual_neurons = self.virtual_model.get_neurons()
self.buf_evt_filter = None
self.spikes = []
self.event_filters = []
self.core_rate = None
self.coarse_value = None
self.coarse_value_set = False
self.connector = DynapseConnector()
self.poisson_spike_gen = self.model.get_fpga_modules()[0]
self.spikegen = self.model.get_fpga_modules()[1]
self.syn_types = SynTypes
self.spikegen_target_chip = None
def off_core(self, core_id):
"""
Switch the core off using by setting TAU1 to a large value
Args:
core_id (int): core index [0-15]
"""
self.groups[core_id].set_bias("IF_TAU1_N", 100, 7)
def save_biases(self, filename):
"""
A wrapper to save the biases to a file
Args:
filename (str): filename should end *.py
"""
PyCtxUtils.save_biases(filename)
def load_biases(self, filename):
"""
A wrapper to load the biases from file
Args:
filename (str): filename should end *.py
"""
exec(open(filename).read())
def clear_all_cams(self):
"""
Clear cams of the whole board
"""
for chip_id in range(4):
CtxDynapse.dynapse.clear_cam(chip_id)
def clear_cams(self, chip_id):
"""
Clear cams of the specified chip
Args:
chip_id (int): chip index
"""
CtxDynapse.dynapse.clear_cam(chip_id)
def clear_all_srams(self):
"""
Clear srams of the whole board
"""
for chip_id in range(4):
CtxDynapse.dynapse.clear_sram(chip_id)
def clear_sram(self, chip_id):
"""
Clear srams of the specified chip
Args:
chip_id (int): chip index
"""
CtxDynapse.dynapse.clear_sram(chip_id)
def copy_biases(self, source_core, target_core):
"""
Copies the full set of biases from one core to another
Args:
source_core (int): core_id from 0 to 15 from where to copy the biases
target_core (int): core_id from 0 to 15 where to write the biases
"""
source_biases = self.groups[source_core].get_biases()
for bias in source_biases:
self.groups[target_core].set_bias(bias.get_bias_name(), bias.get_fine_value(), bias.get_coarse_value())
def connect_spikegen(self, chip_id, syn_type_int):
""" Creates connections from virtual neuron 1 to all neurons of the selected chip
using selected syn_type.
Args:
chip_id (int): stimulated chip ID
syn_type_int (int): integer synapse type to be converted to DynapseCamType withing the method
"""
self.spikegen_target_chip = chip_id
if syn_type_int == 0:
syn_type = SynTypes.SLOW_INH
elif syn_type_int == 1:
syn_type = SynTypes.FAST_INH
elif syn_type_int == 2:
syn_type = SynTypes.SLOW_EXC
elif syn_type_int == 3:
syn_type = SynTypes.FAST_EXC
else:
print("Unable syn type, please try again")
return
for n in range(1024):
self.connector.add_connection(pre=self.virtual_neurons[1],
post=self.neurons[n + 1024*chip_id],
synapse_type=syn_type)
self.model.apply_diff_state()
def disconnect_spikegen(self):
"""
Removes all connections from the spikegen to physical neurons.
##TODO: Seems not to work, needs fixing
"""
self.spikegen_target_chip = None
for n in range(1024):
self.connector.remove_sending_connections(self.virtual_neurons[n])
self.model.apply_diff_state()
def set_rate(self, rate):
"""
Sets spiking rate of the spikegen and starts it.
Args:
rate (int): Spiking rate in Hz, can't be lower ~5 Hz
"""
self.spikegen.stop()
isi_base = 900
unit_mult = isi_base/90
sleep((rate**(-1)))
fpga_event = CtxDynapse.FpgaSpikeEvent()
fpga_event.core_mask = 15
fpga_event.target_chip = self.spikegen_target_chip
fpga_event.neuron_id = 1
fpga_event.isi = int(((rate*1e-6)**(-1))/unit_mult)
self.spikegen.set_variable_isi(False)
self.spikegen.preload_stimulus([fpga_event])
self.spikegen.set_isi(int(((rate*1e-6)**(-1))/unit_mult))
self.spikegen.set_isi_multiplier(isi_base)
self.spikegen.set_repeat_mode(True)
self.spikegen.start()
def dc_steps(self, core_id, period, time_interval, coarse_val):
""" Create square steps of DC current using the IF_DC_P bias with
the specified period and within the specified time_interval, with coarse_val amplitude.
Args:
core_id (int): target core
period (int): period of DC steps, in seconds
time_interval (int): time interval for the method to run, in seconds
coarse_val (int): amplitude of DC steps set by the coarse value of the IF_DC_P bias (fine value is set to 128)
"""
start_time = clock()
while (clock() - start_time < time_interval):
self.groups[core_id].set_bias("IF_DC_P", 128, coarse_val)
sleep(period)
self.groups[core_id].set_bias("IF_DC_P", 0, 0)
sleep(period)
def get_core_rate(self, core_id, time_interval):
""" Create square steps of DC current using the IF_DC_P bias with
the specified period and within the specified time_interval, with coarse_val amplitude.
Args:
core_id (int): target core
period (int): period of DC steps, in seconds
time_interval (int): time interval for the method to run, in seconds
coarse_val (int): amplitude of DC steps set by the coarse value of the IF_DC_P bias (fine value is set to 128)
"""
buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, [idx for idx in range(core_id*256, core_id*256 + 256)])
start_time = clock()
while (clock() - start_time < time_interval):
evts = buf_evt_filter.get_events()
print("Core %d average rate is %g Hz" % (core_id, len(evts)/256))
sleep(1)
buf_evt_filter.clear()
def get_rates(self, n_ids = None, measurement_period = 1):
if n_ids is None:
n_ids = [l for l in range(len(self.neurons))]
buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, n_ids)
evts = buf_evt_filter.get_events()
sleep(measurement_period)
evts = buf_evt_filter.get_events()
buf_evt_filter.clear()
rates = [0 for l in range(len(self.neurons))]
if len(evts) != 0:
for spike in evts:
rates[spike.neuron.get_neuron_id() + 256*spike.neuron.get_core_id() + 1024*spike.neuron.get_core_id()] += 1
for l in range(len(self.neurons)):
rates[l] = rates[l]/measurement_period
return rates
def r(self):
print(self.get_rates([idx for idx in range(0, 10)]))
def get_core_rate_ts(self, core_id, time_interval):
buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, [idx for idx in range(core_id*256, core_id*256 + 25)])
start_time = clock()
while (clock() - start_time < time_interval):
evts = buf_evt_filter.get_events()
if len(evts) != 0:
print("Core %d average rate is %g Hz" % (core_id, len(evts)/(((evts[len(evts)-1].timestamp - evts[0].timestamp) / 1e6)*25)))
sleep(1)
buf_evt_filter.clear()
def monitor(self, core_id, neuron_id):
"""
Wrapper function for monitoring I_mem. Accepts core_id and neuron_id from 0 to 255 instead
of chip_id and neuron_id from 0 to 1023.
Args:
core_id (int): core ID to be monitored
neuron_id (int): neuron index within the selected core
"""
CtxDynapse.dynapse.monitor_neuron(int(core_id / 4), neuron_id + 256*(core_id % 4))
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,331
|
he-xu/TransferRNN
|
refs/heads/master
|
/run_rnn_with_rpyc.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 23:45:13 2019
@author: dzenn
"""
#import PyCtxUtils; PyCtxUtils.start_rpyc_server()
import MainRNNLoop
import rpyc
print("Successful import")
self_path = MainRNNLoop.__file__
self_path = self_path[0:self_path.rfind('/')]
execute_bias_load_string = 'exec(open("' + self_path + '/biases/256_biases.py").read())'
execute_rate_tracker_load_string = 'exec(open("' + self_path + '/ctxctl_rate_tracker.py").read())'
c = rpyc.classic.connect("localhost", 1300)
RPYC_TIMEOUT = 1000 #defines a higher timeout
c._config["sync_request_timeout"] = RPYC_TIMEOUT # Set timeout to higher level
if c:
print("RPyC connection established")
c.execute(execute_bias_load_string)
c.execute(execute_rate_tracker_load_string)
print("Clearing CAMs...")
c.execute("CtxDynapse.dynapse.clear_cam(1)")
c.execute("CtxDynapse.dynapse.clear_cam(3)")
print("Initializing the main loop")
MainLoop = MainRNNLoop.MainRNNLoop(backend = "rpyc", c = c)
c.namespace['neuron_ids'] = MainLoop.RNNController.neuron_ids
c.namespace['lookup'] = MainLoop.RNNController.rnn_neurons_idx_lookup
c.namespace['timesteps'] = MainLoop.RNNController.timesteps
c.execute("rt = RateTracker(neuron_ids, lookup, timesteps, debug = True)")
print("Loading the dataset")
MainLoop.prepare_dataset("data/")
print("Loading complete. Starting...")
MainLoop.run_loop(100)
#
MainLoop.export_error()
MainLoop.export_conn_log()
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,332
|
he-xu/TransferRNN
|
refs/heads/master
|
/ctxctl_rate_tracker.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 19:14:16 2019
@author: dzenn
"""
import CtxDynapse
from time import time
class RateTracker(object):
def __init__(self, neuron_ids, lookup, timesteps, debug = False):
self.timesteps = timesteps
self.neuron_ids = neuron_ids
self.num_neurons = len(neuron_ids)
self.lookup = lookup
self.debug = debug
def start_recording_spikes(self):
"""
Initializes the event filter
"""
model = CtxDynapse.model
self.buf_evt_filter = CtxDynapse.BufferedEventFilter(model, self.neuron_ids)
evts = self.buf_evt_filter.get_events() # flush the event filter to start a new recording
self.recording_start_time = time()
def stop_recording_spikes(self):
"""
Stores all recorded events and clears the event filter
"""
self.evts = self.buf_evt_filter.get_events()
self.recording_stop_time = time()
self.buf_evt_filter.clear()
def process_recorded_evts(self):
"""
Returns firing rates AND input rates based on recorded events and current
weight matrix
"""
lookup = self.lookup
rates = []
input_rates = []
if self.debug:
print("Counting the spikes...")
# Preparing arrays and helper variables
for i in range(self.num_neurons):
rates.append([])
input_rates.append([])
for ts in range(self.timesteps):
rates[i].append(0)
input_rates[i].append(0)
if len(self.evts) != 0:
ref_timestamp = self.evts[0].timestamp
time_bin_size = int((self.recording_stop_time - self.recording_start_time)*1e+06/self.timesteps)
if self.debug:
print("Binning...")
# Placing spikes in bins
for evt in self.evts:
n_id = evt.neuron.get_neuron_id() + 256*evt.neuron.get_core_id() + 1024*evt.neuron.get_chip_id()
idx = lookup[n_id]
time_bin = (evt.timestamp - ref_timestamp)//time_bin_size
# print(idx, time_bin)
if time_bin < self.timesteps:
rates[idx][time_bin] += 1
if self.debug:
print("Normalizing...")
# Normalizing spike counts to rates
for i in range(self.num_neurons):
for ts in range(self.timesteps):
rates[i][ts] = rates[i][ts]/(time_bin_size/1e+06)
# # Computing weighted input rate sums
# for i in range(self.num_neurons):
# pre_id = self.neuron_ids[i]
# for post_id in self.post_lookup[pre_id]:
# for ts in range(self.timesteps):
# input_rates[self.neuron_ids.index(post_id)][ts] += rates[i][ts]*self.current_weight_matrix[(pre_id, post_id)]
if self.debug:
print("Returning rates...")
return rates
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,333
|
he-xu/TransferRNN
|
refs/heads/master
|
/DynapseRNN.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 16:09:09 2019
@author: dzenn
"""
from time import clock, sleep, time
from math import exp
try:
import CtxDynapse
import NeuronNeuronConnector
from CtxDynapse import DynapseCamType as SynapseType
except ModuleNotFoundError:
import rpyc
print("CtxDynapse module not imported. Expecting to run in RPyC mode")
import numpy as np
class DynapseRNN(object):
"""
A controller class for learning-in-the-loop RNN on the Dynapse chip using
CortexControl Python API.
Weight update is defined by FORCE algorithm and performed by
generating a ternary weight matrix after gradient descent with
stochastic rounding within the limitation of number of inputs used per neuron.
The controller places the RNN on one chip and a proxy input population on another.
The controller can be used both through rpyc or directly in CTXCTL console.
"""
def __init__(self, num_inputs, timesteps, multiplex_factor = 1, chip_id_proxy = 3, chip_id = 1, backend = "ctxctl", c = None, debug = False):
"""
Args:
num_inputs (int) : number of input channels (i.e. spike generators)
timesteps (int) : length of each input sequence
multiplex_factor (int, optional) : number of silicon neurons on the chip
which represent a single logical neuron
(i.e. multiplies the size of the RNN)
chip_id_proxy (int, optional) : index of the chip where the proxy input
population is located (should not be the same
as the RNN chip)
chip_id (int, optional) : index of the chip where the RNN is located
backend (str, optional) : sets whether the controller will use RPyC
or try to import modules directly in the console
c (RPyC connection, optional) : RPyC connection object, expected if "rpyc" is
set as backend
debug (bool, optional) : print additional debug info
"""
self.creation_time = time()
if backend == "ctxctl":
self.model = CtxDynapse.model
self.v_model = CtxDynapse.VirtualModel()
self.connector = NeuronNeuronConnector.DynapseConnector()
self.SynapseType = SynapseType
elif backend == "rpyc":
if c is not None:
self.c = c
self.model = c.modules.CtxDynapse.model
self.v_model = c.modules.CtxDynapse.VirtualModel()
self.connector = c.modules.NeuronNeuronConnector.DynapseConnector()
self.SynapseType = c.modules.CtxDynapse.DynapseCamType
# c.execute("from time import time, clock, sleep")
# c.execute("import CtxDynapse")
else:
raise ValueError("Selected backend is 'rpyc' but no connection object is given to DynapseRNN")
else:
raise ValueError("Unknown backend identifier '" + backend + "'. Use 'ctxctl' or 'rpyc' only.")
self.backend = backend
self.neurons = self.model.get_shadow_state_neurons()
self.virtual_neurons = self.v_model.get_neurons()
self.neuron_ids = []
self.poisson_spike_gen = self.model.get_fpga_modules()[0]
self.buf_evt_filter = None
self.evts = []
self.recording_start_time = None
self.recording_stop_time = None
self.teleported_functions = [] # list of functions teleported with rpyc
self.num_inputs = num_inputs
self.timesteps = timesteps
self.pre_lookup = {}
self.post_lookup = {}
self.print = debug
self.last_pre_timestamp = {}
self.last_post_timestamp = {}
self.last_timestamp = {}
self.last_clock_time = {}
self.learn = True
self.debug = debug
self.current_weight_matrix = {}
self.conns_created = 0
self.pre_evts = []
self.post_evts = []
self.pre_times = []
self.post_times = []
self.da_evts = []
self.pre_diff = []
self.post_diff = []
self.conn_log = []
### GD Learning parameters
self.error = None
self.learning_rate = 0.1
self.regularizer = 1
self.multiplex_factor = multiplex_factor
### Allocating neurons
if self.debug:
print("Allocating populations")
self.rnn_neurons = [n for n in self.neurons if n.get_chip_id()==chip_id
and (n.get_neuron_id() + n.get_core_id()*256) < self.num_inputs * self.multiplex_factor]
self.proxy_neurons = [n for n in self.neurons if n.get_chip_id()==chip_id_proxy
and (n.get_neuron_id() + n.get_core_id()*256) < self.num_inputs*2]
self.rnn_neurons_idx_lookup = {}
i = 0
for neuron in self.rnn_neurons:
self.rnn_neurons_idx_lookup.update({neuron.get_neuron_id() + 256*neuron.get_core_id() + 1024*neuron.get_chip_id() : i})
i += 1
### Creating spikegen -> proxy_neurons connections
for n in self.proxy_neurons:
self.connector.add_connection(self.virtual_neurons[(n.get_neuron_id())],
n,
self.SynapseType.SLOW_EXC)
self.connector.add_connection(self.virtual_neurons[(n.get_neuron_id())],
n,
self.SynapseType.SLOW_EXC)
if self.debug:
print("Connected spikegen")
### Creating proxy_neurons -> rnn_neurons connections
for n in self.rnn_neurons:
self.neuron_ids.append(n.get_neuron_id() + n.get_core_id()*256 + n.get_chip_id()*1024)
self.connector.add_connection(self.proxy_neurons[(n.get_neuron_id()) % self.num_inputs],
n,
self.SynapseType.SLOW_EXC)
self.connector.add_connection(self.proxy_neurons[(n.get_neuron_id()) % self.num_inputs],
n,
self.SynapseType.SLOW_EXC)
self.connector.add_connection(self.proxy_neurons[((n.get_neuron_id()) % self.num_inputs) + self.num_inputs],
n,
self.SynapseType.FAST_INH)
self.connector.add_connection(self.proxy_neurons[((n.get_neuron_id()) % self.num_inputs) + self.num_inputs],
n,
self.SynapseType.FAST_INH)
self.model.apply_diff_state()
self.num_neurons = len(self.neuron_ids)
# self.P_prev = self.regularizer*np.eye(self.num_neurons)
self.poisson_spike_gen.set_chip_id(chip_id_proxy)
if self.debug:
print("Connected proxy population")
pre_id_list = []
post_id_list = []
for i in range(self.num_inputs * self.multiplex_factor):
for j in range(self.num_inputs * self.multiplex_factor):
pre_id_list.append(self.neuron_ids[i])
post_id_list.append(self.neuron_ids[j])
## Prepare connectivity matrix
for pre_id, post_id in zip(pre_id_list, post_id_list):
add_to_dict(self.post_lookup, pre_id, post_id)
add_to_dict(self.pre_lookup, post_id, pre_id)
self.last_pre_timestamp[(pre_id, post_id)] = 0
self.last_post_timestamp[(pre_id, post_id)] = 0
self.last_timestamp[(pre_id, post_id)] = 0
self.last_clock_time[(pre_id, post_id)] = 0
self.current_weight_matrix[(pre_id, post_id)] = 0
# self.init_weights()
self.w_ternary = np.zeros([self.num_neurons, self.num_neurons])
self.apply_new_matrix(self.w_ternary)
# if self.backend == 'rpyc':
# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.start_recording_spikes))
# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.stop_recording_spikes))
# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.process_recorded_evts))
# self.start_recording_spikes = lambda: self.teleported_functions[0](self)
# self.stop_recording_spikes = lambda: self.teleported_functions[1](self)
# self.process_recorded_evts = lambda: self.teleported_functions[2](self)
if self.debug:
print("RNN Init Complete")
def init_weights(self, cam_num=60):
"""
Random ternary weight initialization
"""
w_ternary = np.zeros([self.num_neurons, self.num_neurons])
w_ternary[:,:cam_num//2]=1
w_ternary[:,cam_num//2:cam_num]=-1
w_rand = np.random.rand(self.num_neurons, self.num_neurons)
w_order = np.argsort(w_rand, axis=0)
self.w_ternary = w_ternary.T[w_order, np.arange(w_order.shape[1])].T
def start_recording_spikes(self):
"""
Initializes the event filter
"""
if self.backend == 'rpyc':
self.c.execute('rt.start_recording_spikes()')
else:
model = CtxDynapse.model
self.buf_evt_filter = CtxDynapse.BufferedEventFilter(model, self.neuron_ids)
evts = self.buf_evt_filter.get_events() # flush the event filter to start a new recording
self.recording_start_time = time()
def stop_recording_spikes(self):
"""
Stores all recorded events and clears the event filter
"""
if self.backend == 'rpyc':
self.c.execute('rt.stop_recording_spikes()')
else:
self.evts = self.buf_evt_filter.get_events()
self.recording_stop_time = time()
self.buf_evt_filter.clear()
def process_recorded_evts(self):
"""
Returns firing rates AND input rates based on recorded events and current
weight matrix
"""
if self.backend == 'rpyc':
self.c.execute('rates = rt.process_recorded_evts()')
return self.c.namespace['rates']
else:
lookup = self.rnn_neurons_idx_lookup
evts = self.evts
rates = []
input_rates = []
if self.debug:
print("Counting the spikes...")
# Preparing arrays and helper variables
for i in range(self.num_neurons):
rates.append([])
input_rates.append([])
for ts in range(self.timesteps):
rates[i].append(0)
input_rates[i].append(0)
if len(evts) != 0:
ref_timestamp = evts[0].timestamp
time_bin_size = int((self.recording_stop_time - self.recording_start_time)*1e+06/self.timesteps)
if self.debug:
print("Binning...")
# Placing spikes in bins
for evt in evts:
n_id = evt.neuron.get_neuron_id() + 256*evt.neuron.get_core_id() + 1024*evt.neuron.get_chip_id()
idx = lookup[n_id]
time_bin = (evt.timestamp - ref_timestamp)//time_bin_size
# print(idx, time_bin)
if time_bin < self.timesteps:
rates[idx][time_bin] += 1
if self.debug:
print("Normalizing...")
# Normalizing spike counts to rates
for i in range(self.num_neurons):
for ts in range(self.timesteps):
rates[i][ts] = rates[i][ts]/(time_bin_size/1e+06)
# # Computing weighted input rate sums
# for i in range(self.num_neurons):
# pre_id = self.neuron_ids[i]
# for post_id in self.post_lookup[pre_id]:
# for ts in range(self.timesteps):
# input_rates[self.neuron_ids.index(post_id)][ts] += rates[i][ts]*self.current_weight_matrix[(pre_id, post_id)]
if self.debug:
print("Returning rates...")
return rates #, input_rates
def apply_new_matrix(self, w_ternary, print_w = False):
"""
Applies the new weight matrix to the chip
"""
if self.debug:
print("Applying connectivity changes...")
num_conns_removed = 0
num_conns_created = 0
for i in range(len(self.pre_lookup)):
for j in range(len(self.post_lookup)):
pre_id = self.neuron_ids[i]
post_id = self.neuron_ids[j]
current_w = self.current_weight_matrix[(pre_id, post_id)]
delta_w = w_ternary[j][i] - current_w
# if self.debug:
# print(w_ternary[j][i], current_w)
# print("Delta: ", int(abs(delta_w)))
for conn_idx in range(int(abs(delta_w))):
if print_w:
print("removal phase")
print(delta_w, current_w, w_ternary[j][i], i, j)
if delta_w > 0:
if current_w < 0:
self.connector.remove_connection(self.neurons[pre_id], self.neurons[post_id])
num_conns_removed += 1
self.current_weight_matrix[(pre_id, post_id)] += 1
elif delta_w < 0:
if current_w > 0:
self.connector.remove_connection(self.neurons[pre_id], self.neurons[post_id])
num_conns_removed += 1
self.current_weight_matrix[(pre_id, post_id)] -= 1
current_w = self.current_weight_matrix[(pre_id, post_id)]
for i in range(len(self.pre_lookup)):
for j in range(len(self.post_lookup)):
pre_id = self.neuron_ids[i]
post_id = self.neuron_ids[j]
current_w = self.current_weight_matrix[(pre_id, post_id)]
delta_w = w_ternary[j][i] - current_w
# if self.debug:
# print(w_ternary[j][i], current_w)
# print("Delta: ", int(abs(delta_w)))
for conn_idx in range(int(abs(delta_w))):
if print_w:
print("addition phase")
print(delta_w, current_w, w_ternary[j][i], i, j)
if delta_w > 0:
if current_w >= 0:
self.connector.add_connection(self.neurons[pre_id], self.neurons[post_id], self.SynapseType.FAST_EXC)
num_conns_created += 1
self.current_weight_matrix[(pre_id, post_id)] += 1
elif delta_w < 0:
if current_w <= 0:
self.connector.add_connection(self.neurons[pre_id], self.neurons[post_id], self.SynapseType.FAST_INH)
num_conns_created += 1
self.current_weight_matrix[(pre_id, post_id)] -= 1
current_w = self.current_weight_matrix[(pre_id, post_id)]
self.model.apply_diff_state()
if self.debug:
print("Done.")
print("Neuron 0 matrix sum", np.abs(w_ternary[0, :]).sum())
print("%d conns removed, %d conns created" % (num_conns_removed, num_conns_created))
self.conn_log.append([num_conns_removed, num_conns_created])
def present_stimulus(self, stim_array, timestep_length):
"""
Presents the array of rates to the virtual neuron population
"""
if self.debug:
print("Presenting the digit...")
self.poisson_spike_gen.start()
for ts in range(self.timesteps):
for i in range(self.num_inputs):
rate = stim_array[ts, i]*100
if rate >= 0:
self.poisson_spike_gen.write_poisson_rate_hz(i, rate)
else:
self.poisson_spike_gen.write_poisson_rate_hz(i + self.num_inputs, abs(rate))
sleep(timestep_length)
for i in range(self.num_inputs):
self.poisson_spike_gen.write_poisson_rate_hz(i, 0)
self.poisson_spike_gen.stop()
if self.debug:
print("Done.")
def ternarize(self, w_new, cam_num):
"""
"""
w_order = np.argsort(np.abs(w_new.T), axis=0)
w_sorted = w_new.T[w_order, np.arange(w_order.shape[1])]
w_sorted[:-cam_num, :]=0
w_order_order = np.argsort(w_order, axis=0)
w_undone = w_sorted[w_order_order, np.arange(w_order_order.shape[1])].T
w_undone[w_undone>0] = 1
w_undone[w_undone<0] = -1
return w_undone
def stochastic_round(self, w_ternary, d_w, cam_num):
"""
Stochastically rounds the ternary connectivity matrix
"""
w_new = w_ternary - d_w
w_uniform = np.random.uniform(size=d_w.shape)
d_w_rounded = ((w_uniform < np.abs(d_w))*np.sign(d_w)).astype(np.int)
w_new_rounded = w_ternary - d_w_rounded
w_new_rounded[w_new_rounded>1] = 1
w_new_rounded[w_new_rounded<-1] = -1
w_order = np.argsort(np.abs(w_new.T), axis=0)
w_new_rounded_sorted = w_new_rounded.T[w_order, np.arange(w_order.shape[1])]
num_neuron = w_order.shape[1]
for idx_post in range(num_neuron):
cam_used = 0
for idx_pre in range(num_neuron):
w_ij = w_new_rounded_sorted[-idx_pre, idx_post]
if np.abs(w_ij) > 0.1:
cam_used += 1
if cam_used >= cam_num:
w_new_rounded_sorted[:-idx_pre, idx_post] = 0
break
w_order_order = np.argsort(w_order, axis=0)
w_undone = w_new_rounded_sorted[w_order_order, np.arange(w_order_order.shape[1])].T
return w_undone
def update_weight(self, rate_psc, rate_teacher, cam_num=60, learning_rate=0.1):
"""
Generates the new ternary connectivity matrix based on measured on-chip rates and teacher signal
Args:
rate_psc (numpy array) : array of rates of shape (num_neurons, timesteps)
rate_teacher (numpy array) : array of teacher rates of shape (num_inputs, timesteps)
cam_num (int, optional) : maximum number of CAMs used by each neurons
learning_rate (float, optional) : scaler of the weight change gradients
Returns:
w_ternary : new on-chip connectivity matrix
c_grad : increase\decrease global activity level
"""
rate_recurrent = self.w_ternary.dot(rate_psc)
rate_teacher_tile = np.tile(rate_teacher.T, (self.multiplex_factor,1))
self.error = rate_recurrent - rate_teacher_tile
d_w = 0
for t in range(self.timesteps):
r_t = rate_psc[:, t][:,np.newaxis]
# P_up = self.P_prev.dot(r_t.dot(r_t.T.dot(self.P_prev)))
# P_down = 1 + r_t.T.dot(self.P_prev.dot(r_t))
# self.P_prev = self.P_prev - P_up / P_down
e_t = self.error[:, t][:,np.newaxis]
# d_w += e_t.dot(r_t.T.dot(self.P_prev))
d_w += e_t.dot(r_t.T)
d_w = d_w / self.timesteps
w_new = self.w_ternary - learning_rate*d_w
norm_ratio = np.linalg.norm(w_new, 'fro')/np.linalg.norm(self.w_ternary, 'fro')
self.w_ternary = self.stochastic_round(self.w_ternary, learning_rate*d_w, cam_num)
#self.w_ternary = self.ternarize(w_new, cam_num)
print(d_w.mean(), d_w.max(), d_w.min())
print(rate_recurrent.mean(), rate_teacher.mean())
if norm_ratio > 1:
c_grad = 1
else:
c_grad = -1
return c_grad, np.abs(self.error).mean()
def relaxate(A, tau, delta_t):
"""
Computes the exponential
"""
return A*exp(-delta_t/tau)
# print(len(evts))
def add_to_dict(dct, key, value):
"""
A tool to add elements to dictionaries of lists.
Appends a value to the list dct[key], otherwise creates it.
"""
if key in dct:
dct[key].append(value)
else:
dct[key] = [value]
|
{"/MainRNNLoop.py": ["/DynapseRNN.py"], "/bias_tuning_tools.py": ["/NeuronNeuronConnector.py"], "/run_rnn_with_rpyc.py": ["/MainRNNLoop.py"], "/DynapseRNN.py": ["/NeuronNeuronConnector.py"]}
|
16,334
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/ContentScraper.py
|
import abc
import ScraperHelper as helper
from bs4 import BeautifulSoup
import json
from urllib.parse import urlparse
from datetime import date, datetime
import re
class ContentScraperAbstract(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getArticleDatePublished(self):
"""Required Method"""
@abc.abstractmethod
def getArticleDateModified(self):
"""Required Method"""
@abc.abstractmethod
def getArticleTitle(self):
"""Required Method"""
@abc.abstractmethod
def getArticleCategory(self):
"""Required Method"""
@abc.abstractmethod
def getArticleAuthor(self):
"""Required Method"""
@abc.abstractmethod
def getArticleAuthorPosition(self):
"""Required Method"""
@abc.abstractmethod
def getArticleScope(self):
"""Required Method"""
@abc.abstractmethod
def getNotFoundValue(self):
"""Required Method"""
@abc.abstractmethod
def isArticleCompliant(self):
"""Required Method"""
@abc.abstractmethod
def createParser(self, pageContent):
"""Required Method"""
class FifteenContentScraper(ContentScraperAbstract):
def __init__(self, url):
self._url = url
def getArticleDatePublished(self):
datePublishedTag = self._soup.find("meta", attrs={"itemprop":"datePublished"})
datePublished = None
if datePublishedTag:
datePublished = datePublishedTag.get("content")
return datePublished
def getArticleDateModified(self):
dateModifiedTag = self._soup.find("meta", attrs={"itemprop":"dateModified"})
dateModified = None
if dateModifiedTag:
dateModified = dateModifiedTag.get("content")
return dateModified
def getArticleTitle(self):
articleTitleTag = self._soup.find("h1", attrs={"itemprop":"headline"})
articleTitle = articleTitleTag.text.strip()
return articleTitle
def getArticleCategory(self):
categoryTags = self._soup.findAll("li", attrs={"itemprop":"itemListElement"})
categoryName = ""
for categoryTag in categoryTags:
categoryNameTag = categoryTag.find("span", attrs={"itemprop":"name"})
if len(categoryName) != 0:
categoryName += " > "
categoryName += categoryTag.text.strip()
return categoryName
def getArticleAuthor(self): #TODO: optimize - only perform find of authorTag once per url, reuse authorTag in getArticleAuthorPosition etc.
authorTag = self._soup.find("div", attrs={"class":"author-name-block"}) #this tag represents author block if it's 15min employee and the source is 15min
if authorTag is None:
authorTag = self._soup.find("div", attrs={"class":"author-info author-text"}) #this tag represents author from another source
if authorTag is not None:
authorName = authorTag.find("span", attrs={"itemprop":"name"}).text.strip()
else:
authorName = self.getNotFoundValue()
return authorName
def getArticleAuthorPosition(self):
authorTag = self._soup.find("div", attrs={"class":"author-name-block"})
if authorTag is not None:
authorPosition = authorTag.find("div", attrs={"class":"author-position"}).text.strip()
else:
authorPosition = self.getNotFoundValue()
return authorPosition
def getArticleScope(self):
articleTitleTag = self._soup.find("h1", attrs={"itemprop":"headline"})
articleIntroTag = self._soup.find("h4", attrs={"class":"intro"})
articleContentTag = self._soup.find("div", attrs={"class": "article-content"})
scopes = [articleTitleTag, articleIntroTag, articleContentTag]
return scopes
def getNotFoundValue(self):
return "n/a"
def isArticleCompliant(self):
return True
def createParser(self, pageContent):
self._pageContent = pageContent
self._soup = BeautifulSoup(pageContent, "html.parser")
class DelfiContentScraper(ContentScraperAbstract):
def __init__(self, url):
self._url = url
def getArticleDatePublished(self):
datePublishedTag = self._soup.find("div", attrs={"class":"source-date"})
if (datePublishedTag is None):
datePublishedTag = self._soup.find("div", attrs={"class":"delfi-source-date"})
datePublished = ""
if datePublishedTag is not None:
datePublished = datePublishedTag.text.strip()
else:
datePublished = None
return datePublished
def getArticleDateModified(self):
dateModified = None
return dateModified
def getArticleTitle(self):
articleTitle = self._getRegularArticleTitle()
if len(articleTitle) == 0:
articleTitle = self._getMultimediaArticleTitle()
if len(articleTitle) == 0:
articleTitle = self.getNotFoundValue()
return articleTitle
def _getRegularArticleTitle(self):
articleTitleTag = self._soup.find("div", attrs={"class": "article-title"})
articleTitle = ""
if articleTitleTag is not None:
articleTitle = articleTitleTag.find("h1").text.strip()
articleTitle = articleTitle.replace("\n", " ")
articleTitle = articleTitle.replace("\t", "")
return articleTitle
def _getMultimediaArticleTitle(self):
articleTitleTag = self._soup.find("h1", attrs={"itemprop": "headline"})
articleTitle = ""
if articleTitleTag is not None:
articleTitle = articleTitleTag.text.strip()
articleTitle = articleTitle.replace("\n", " ")
articleTitle = articleTitle.replace("\t", "")
return articleTitle
def getArticleCategory(self):
categoryName = ""
categoryFatherTag = self._soup.find("div", attrs={"class":"delfi-breadcrumbs delfi-category-location"})
if categoryFatherTag is not None:
categoryTags = categoryFatherTag.findAll("span", attrs={"itemprop":"itemListElement"})
if categoryTags is not None:
for categoryTag in categoryTags:
if len(categoryName) != 0:
categoryName += " > "
categoryName += categoryTag.text.strip()
if len(categoryName) == 0:
categoryName = self.getNotFoundValue()
return categoryName
def getArticleAuthor(self):
authorTag = self._soup.find("div", attrs={"class":"delfi-author-name"})
if authorTag is None:
authorTag = self._soup.find("div", attrs={"class":"delfi-source-name"})
if authorTag is not None:
authorName = authorTag.text.strip()
else:
authorName = self.getNotFoundValue()
return authorName
def getArticleAuthorPosition(self):
authorTag = self._soup.find("div", attrs={"class":"delfi-author-name"})
authorPosition = ""
if authorTag is not None:
authorBioLinkTag = authorTag.find("a")
if authorBioLinkTag is not None:
authorBioLink = authorBioLinkTag.get("href")
bioPageContent = helper.httpget(authorBioLink)
if bioPageContent is not None:
babySoup = BeautifulSoup(bioPageContent, "html.parser")
authorPosition = babySoup.find("div", attrs={"class":"title"}).text.strip()
if len(authorPosition) == 0:
authorPosition = self.getNotFoundValue()
else:
authorPosition = self.getNotFoundValue()
return authorPosition
def getArticleScope(self):
articleTitleTag = self._soup.find("div", attrs={"class":"article-title"})
articleIntroTag = self._soup.find("div", attrs={"class":"delfi-article-lead"})
bigColumnTag = self._soup.find("div", attrs={"class": "col-xs-8"})
articleContentTag = bigColumnTag.find("div") #or bigColumnTag.div (finds the first <div> in bigColumnTag because delfi
scopes = [articleTitleTag, articleIntroTag, articleContentTag]
return scopes
def getNotFoundValue(self):
return "n/a"
def isArticleCompliant(self):
return True
def createParser(self, pageContent):
self._pageContent = pageContent
self._soup = BeautifulSoup(pageContent, "html.parser")
class LrytasContentScraper(ContentScraperAbstract):
def __init__(self, url, webDriverPath):
self._url = url
self._webDriverPath = webDriverPath
def getArticleDatePublished(self):
datePublishedTag = self._soup.find("meta", attrs={"itemprop":"datePublished"})
datePublished = None
if datePublishedTag:
datePublished = datePublishedTag.get("content")
return datePublished
def getArticleDateModified(self):
dateModified = None
return dateModified
def getArticleTitle(self):
articleTitle = self.getNotFoundValue()
aboutBlockJson = self._soup.find("script", type="application/ld+json").contents
if aboutBlockJson:
jsonText = aboutBlockJson[0]
jsonText = jsonText.strip().replace("\t", "").replace("\n", "").replace("\r", "")
jsonText = self._cleanHtml(jsonText)
about = json.loads(jsonText)
articleTitle = about["headline"].strip()
return articleTitle
def _cleanHtml(self, raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def getArticleCategory(self):
categoryName = ""
aboutBlockJson = self._soup.findAll("script", attrs={"type": "application/ld+json"})[-1].contents
if aboutBlockJson:
jsonText = aboutBlockJson[0]
jsonText = jsonText.strip().replace("\t", "").replace("\n", "").replace("\r", "")
jsonText = self._cleanHtml(jsonText)
about = json.loads(jsonText)
for itemListElement in about["itemListElement"]:
if len(categoryName) != 0:
categoryName += " > "
categoryName += itemListElement["item"]["name"]
if len(categoryName) == 0:
categoryName = self.getNotFoundValue()
return categoryName
def getArticleAuthor(self):
authorName = self.getNotFoundValue()
aboutBlockJson = self._soup.find("script", type="application/ld+json").contents
if aboutBlockJson:
jsonText = aboutBlockJson[0]
jsonText = jsonText.strip().replace("\t", "").replace("\n", "").replace("\r", "")
jsonText = self._cleanHtml(jsonText)
about = json.loads(jsonText)
authorName = about["publisher"]["name"]
return authorName
def getArticleAuthorPosition(self):
return self.getNotFoundValue()
def getArticleScope(self):
script = self._soup.find("body").find("script").contents[0]
script = script.strip().replace("\t", "").replace("\n", "")
script = self._cleanHtml(script)
pos = script.find("{")
pos2 = script.find("};")
jsonbby = script[pos:pos2 + 1]
articleJsonObj = json.loads(jsonbby)
scopes = [articleJsonObj["clearContent"], articleJsonObj["title"]]
return scopes
def getNotFoundValue(self):
return "n/a"
def isArticleCompliant(self):
isCompliant = True
articleDate = self._getArticleDate()
if articleDate and (articleDate < date(2019, 1, 1) or articleDate > date(2019, 12, 31)):
isCompliant = False
return isCompliant
def _getArticleDate(self):
url = self._url
parsedUrl = urlparse(url)
sectorPosition = 0
pathParts = parsedUrl.path[1:].split("/")
articleDate = None
for sector in pathParts:
try:
value = int(sector)
except:
value = 0
if value != 0:
year = value
try:
month = int(pathParts[sectorPosition + 1])
day = int(pathParts[sectorPosition + 2])
articleDate = date(year, month, day)
except:
articleDate = None
break
sectorPosition += 1
return articleDate
def createParser(self, pageContent):
self._pageContent = pageContent
self._soup = BeautifulSoup(pageContent, "html.parser")
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,335
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/Scripts/ScrapeIt.py
|
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
import urllib3 as urllib
import sys
import os
from datetime import datetime
def getCurrentDateTime():
now = datetime.now()
return now.strftime("%d_%m_%Y_%H_%M_%S")
def createWorkSessionFolder(createInPath):
createdFolder = createInPath + "\\" + "session_" + getCurrentDateTime()
os.mkdir(createdFolder)
return createdFolder
workFolder = "C:\Data\GetLinks"
workSessionFolder = createWorkSessionFolder(workFolder)
def httpget(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if isResponseOK(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def isResponseOK(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def getIncrementalUrl(url, i):
return url.replace("{0}", str(i))
def isIncrementalUrl(url):
if (url.find("{0}") != -1):
return True
else:
return False
def getLinksFromPageContent(pageContent):
soup = BeautifulSoup(pageContent, 'html.parser')
links = []
for a in soup.find_all('a'):
links.append(a.get('href'))
return links
def saveToFile(path, links):
fileNameWithPath = path + "\\" + "result.csv"
file = open(fileNameWithPath, "w+")
for link in links:
if (link is not None):
file.write(link + "\r\n")
file.close()
def getLinks(regularUrls):
for url in regularUrls:
pageContent = httpget(url)
links = getLinksFromPageContent(pageContent)
saveToFile(workSessionFolder, links)
def getLinksFromIncrementalUrls(incrementalUrls, pagesCount):
for i in range(1, pagesCount):
for url in incrementalUrls:
urlForRequest = getIncrementalUrl(url, i)
pageContent = httpget(urlForRequest)
links = getLinksFromPageContent(pageContent)
saveToFile(workSessionFolder, links)
def validateArgs(args):
if (args[1] is None or args[2] is None):
print("Wrong arguments. 1st argument is the file of links, 2nd argument is the incremental value of how many pages to view.")
return False
def main(args):
if (validateArgs(args) == False):
return
linksFilePath = str(args[1])
pagesCount = int(args[2])
file = open(linksFilePath, "r")
fileLines = file.readlines()
file.close()
incrementalUrls = []
regularUrls = []
for line in fileLines:
if (isIncrementalUrl(line)):
incrementalUrls.append(line)
else:
regularUrls.append(line)
getLinksFromIncrementalUrls(incrementalUrls, pagesCount)
getLinks(regularUrls)
if __name__ == '__main__':
main(sys.argv)
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,336
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/ParallelTest.py
|
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
import time
def myfunction(i, parameters):
time.sleep(4)
print(i)
return i
num_cores = multiprocessing.cpu_count()
print(num_cores)
myList = ["a", "b", "c", "d"]
parameters = ["param1", "param2"]
inputs = tqdm(myList)
if __name__ == "__main__":
processed_list = Parallel(n_jobs = num_cores)(delayed(myfunction)(i, parameters) for i in inputs)
print(processed_list)
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,337
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/driverWrapper.py
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
import time
service = Service('c:\\data\\chromedriver\\chromedriver.exe')
service.start()
cdi = webdriver.Remote(service.service_url)
cdi.get('http://www.google.com/');
time.sleep(5) # Let the user actually see something!
cdi.quit()
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,338
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/AnalyzeLinks.py
|
import ScraperHelper as helper
from ContentFetcher import FileContentFetcher, HttpContentFetcher
from ContentScraper import ContentScraperAbstract, FifteenContentScraper, DelfiContentScraper, LrytasContentScraper
import os
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
import csv
import re
class SimpleContentScraper:
def __init__(self, contentFetcherStrategy, workSessionFolder, cpuCount, regexCompliancePatterns):
self._contentFetcherStrategy = contentFetcherStrategy
self._workSessionFolder = workSessionFolder
self._cpuCount = cpuCount
self._regexCompliancePatterns = regexCompliancePatterns
def scrape(self):
workList = tqdm(self._contentFetcherStrategy.getWorkList())
results = Parallel(n_jobs=self._cpuCount)(delayed(self._processResource)(resource) for resource in workList)
results.insert(0, self._getDataSetHeader())
return self._removeEmptyEntries(results)
def _processResource(self, resource):
contentScraperStrategy = self.getContentScraperStrategy(self._contentFetcherStrategy.getContentScraperSuggestion(resource), resource)
pageContent = self._contentFetcherStrategy.getContent(resource)
result = []
try:
if pageContent is not None:
contentScraperStrategy.createParser(pageContent) #TODO: rename to init
allMatches = self._getPatternMatches(contentScraperStrategy.getArticleScope())
if self._isArticleCompliant(allMatches) and contentScraperStrategy.isArticleCompliant():
result.append(self._contentFetcherStrategy.getResourceName(resource))
result.append(contentScraperStrategy.getArticleTitle())
result.append(contentScraperStrategy.getArticleCategory())
result.append(contentScraperStrategy.getArticleDatePublished())
result.append(contentScraperStrategy.getArticleDateModified())
result.append(contentScraperStrategy.getArticleAuthor())
result.append(contentScraperStrategy.getArticleAuthorPosition())
result = self._getPatternMatchesColumns(result, allMatches)
result.append(resource)
savedContentFileName = self._savePageContentToFile(resource, pageContent)
result.append(savedContentFileName)
except Exception as ex:
result.clear()
print(str(os.getpid()) + " failed to process: " + resource)
self._log(ex, resource)
return result
def _removeEmptyEntries(self, entries):
cleanedEntries = [x for x in entries if x != []]
return cleanedEntries
def _getDataSetHeader(self):
dataSetHeader = ["Source", "Title", "Category", "Date published", "Date modified", "Author", "Author position"]
for pattern in self._regexCompliancePatterns:
dataSetHeader.append("Count of {0}".format(pattern))
dataSetHeader.append("Url")
dataSetHeader.append("Path to local source")
return dataSetHeader
def _savePageContentToFile(self, resource, pageContent):
resourceName = self._contentFetcherStrategy.getFullSafeResourceName(resource)
outputFileName = self._workSessionFolder + "\\" + str(os.getpid()) + "_" + helper.getCurrentDateTime() + resourceName + ".htm"
pageContentFile = open(outputFileName, "w+", encoding="utf-8")
pageContentFile.writelines(pageContent)
pageContentFile.close()
return outputFileName
def _log(self, exception, url):
try:
logFile = open(self._workSessionFolder + "\\" + "log.txt", "a+")
logFile.write("Exception has occured: " + str(url) + "\n")
logFile.write(str(exception) + "\n")
logFile.write(str(exception.args) + "\n")
logFile.write(str(os.getpid()) + " tried to process it at " + str(helper.getCurrentDateTime()) + " but failed." + "\n\n")
logFile.close()
except:
print("lol failed to write to the log file but please do continue: " + url)
def _getPatternMatchesColumns(self, currentResult, allPatternMatches):
for i in range(0, len(self._regexCompliancePatterns)):
count = 0
for matches in allPatternMatches[i]:
count += len(matches)
currentResult.append(count)
return currentResult
@staticmethod
def getContentScraperStrategy(suggestion, resource):
contentScraperStrategy = ContentScraperAbstract()
if suggestion == "www.15min.lt":
contentScraperStrategy = FifteenContentScraper(resource)
elif suggestion == "www.delfi.lt":
contentScraperStrategy = DelfiContentScraper(resource)
elif suggestion == "www.lrytas.lt":
contentScraperStrategy = LrytasContentScraper(resource, "c:\\data\\chromedriver\\chromedriver.exe")
else:
raise Exception("Could not pick content scraper strategy for " + resource)
return contentScraperStrategy
def _getPatternMatches(self, articleScopes):
allMatches = []
for pattern in self._regexCompliancePatterns:
matches = []
for scope in articleScopes:
try:
scopeText = scope.text
except:
scopeText = str(scope)
matches.append(re.findall(pattern, scopeText, flags=re.IGNORECASE))
allMatches.append(matches)
return allMatches
def _isArticleCompliant(self, allMatches):
isCompliant = False
for i in range(0, len(self._regexCompliancePatterns)):
for matches in allMatches[i]:
if len(matches) > 0:
isCompliant = True
break
return isCompliant
def main():
linksFile = "C:\\Data\\AnalyzeLinks\\links.csv"
linksFile = "C:\\Data\\AnalyzeLinks\\linksTest.csv" #Test
workFolder = "C:\Data\AnalyzeLinks"
workSessionFolder = helper.createWorkSessionFolder(workFolder)
resultFile = workSessionFolder + "\\" + "result.csv"
filesPathFileContentFetcher = "C:\Data\deliverables\iteration3\sources"
filesPathFileContentFetcher = workSessionFolder #Test
cpuCount = multiprocessing.cpu_count()
#cpuCount = 1 #Test
regexCompliancePatterns = [r"(skandal.*?\b)"]
simpleContentScraper = SimpleContentScraper(HttpContentFetcher(linksFile), workSessionFolder, cpuCount, regexCompliancePatterns)
scrapeResult = simpleContentScraper.scrape()
simpleContentScraper = SimpleContentScraper(FileContentFetcher(filesPathFileContentFetcher), workSessionFolder, cpuCount, regexCompliancePatterns)
scrapeResult = simpleContentScraper.scrape()
with open(resultFile, "w+", encoding="utf-8", newline='') as resultFile:
writer = csv.writer(resultFile)
writer.writerows(scrapeResult)
if __name__ == '__main__':
main()
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,339
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/test.py
|
from urllib.parse import urlparse
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
import os
from datetime import datetime
from bs4 import BeautifulSoup
def httpget(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if isResponseOK(resp):
return resp.text
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def isResponseOK(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def getCurrentDateTime():
now = datetime.now()
return now.strftime("%d_%m_%Y_%H_%M_%S")
def createWorkSessionFolder(createInPath):
createdFolder = createInPath + "\\" + "session_" + getCurrentDateTime()
os.mkdir(createdFolder)
return createdFolder
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def cleanPageContent(html):
soup = BeautifulSoup(html, "html.parser") # create a new bs4 object from the html data loaded
for script in soup(["script", "style"]): # remove all javascript and stylesheet code
script.extract()
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def main():
url = "https://www.delfi.lt/news/daily/world/naujas-skandalas-vokietijoje-tarsi-sprogimas-isikiso-ir-merkel.d?id=77357427"
pageContent = httpget(url)
pageText = cleanPageContent(pageContent)
writeFile = open("test.txt", "w+", encoding='utf-8')
writeFile.writelines(pageText)
writeFile.close()
writeFile = open("test.htm", "w+", encoding='utf-8')
writeFile.writelines(pageContent)
writeFile.close()
if __name__ == '__main__':
main()
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,340
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/ContentFetcher.py
|
import abc
from urllib.parse import urlparse
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from os import listdir
from os.path import isfile, join
class ContentFetcherAbstract(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getWorkList(self):
"""Required Method"""
@abc.abstractmethod
def getContentScraperSuggestion(self, resource):
"""Required Method"""
@abc.abstractmethod
def getContent(self, resource):
"""Required Method"""
@abc.abstractmethod
def getResourceName(self, resource):
"""Required Method"""
@abc.abstractmethod
def getFullSafeResourceName(self, resource):
"""Required Method"""
class FileContentFetcher(ContentFetcherAbstract):
def __init__(self, filesPath):
self._filesPath = filesPath
def getWorkList(self):
onlyFiles = [self._filesPath + "\\" + f for f in listdir(self._filesPath) if isfile(join(self._filesPath, f))]
return onlyFiles
def getContentScraperSuggestion(self, resource):
contentScraperSuggestion = ""
if resource.find("15minlt") != -1:
contentScraperSuggestion = "www.15min.lt"
elif resource.find("delfilt") != -1:
contentScraperSuggestion = "www.delfi.lt"
elif resource.find("lrytaslt") != -1:
contentScraperSuggestion = "www.lrytas.lt"
else:
raise Exception("Could not pick content scraper strategy for " + resource)
return contentScraperSuggestion
def getContent(self, resource):
resourceFile = open(resource, "r", encoding="utf-8")
fileContent = resourceFile.readlines()
mergedFileContent = self._getMergedFileContent(fileContent)
return mergedFileContent
def getResourceName(self, resource):
return self.getContentScraperSuggestion(resource)
def getFullSafeResourceName(self, resource):
return self._getFullSafeResourceName(resource)
def _getFullSafeResourceName(self, resource):
translationTable = dict.fromkeys(map(ord, '!@#$%^&?*()_+=[];/\\,.:'), None)
stripped = resource.translate(translationTable)[:40]
safeUrl = stripped
return safeUrl
def _getMergedFileContent(self, fileContent):
mergedFileContent = ""
for line in fileContent:
mergedFileContent += str(line)
return mergedFileContent
class HttpContentFetcher(ContentFetcherAbstract):
def __init__(self, inputFilePath):
self._inputFilePath = inputFilePath
def getWorkList(self):
return self._getContentFromInputFile()
def getContentScraperSuggestion(self, resource):
return self._getContentScraperSuggestion(resource)
def getResourceName(self, resource):
return self._getSourceHostname(resource)
def getContent(self, resource):
return self._httpget(resource)
def getFullSafeResourceName(self, resource):
return self._getFullSafeResourceName(resource)
def _getFullSafeResourceName(self, url):
parsedUrl = urlparse(url)
translationTable = dict.fromkeys(map(ord, '!@#$%^&?*()_+=[];/\\,.:'), None)
strippedPath = parsedUrl.path.translate(translationTable)[:40]
strippedHostname = parsedUrl.hostname.replace("w", "").replace(".", "")
safeUrl = strippedHostname + "_" + strippedPath
return safeUrl
def _getSourceHostname(self, url):
return urlparse(url).hostname
def _getContentScraperSuggestion(self, url):
parsedUrl = urlparse(url)
contentScraperSuggestion = ""
if parsedUrl.hostname == "www.15min.lt":
contentScraperSuggestion = "www.15min.lt"
elif parsedUrl.hostname == "www.delfi.lt":
contentScraperSuggestion = "www.delfi.lt"
elif parsedUrl.hostname == "www.lrytas.lt":
contentScraperSuggestion = "www.lrytas.lt"
else:
raise Exception("Could not pick content scraper strategy for " + url)
return contentScraperSuggestion
def _getContentFromInputFile(self):
inputFile = open(self._inputFilePath, "r")
fileContent = inputFile.readlines()
inputFile.close()
fileContent = self._filterFile(fileContent)
fileContentCleansed = []
for line in fileContent:
fileContentCleansed.append(self._cleanurl(line))
return fileContentCleansed
def _filterFile(self, fileContent):
doNotIncludeTheseLinksPlease = ["video", "multimedija"]
filteredFileContent = []
for url in fileContent:
parsedUrl = urlparse(url)
firstPathInUrl = parsedUrl.path[1:parsedUrl.path.find("/", 1)]
if firstPathInUrl in doNotIncludeTheseLinksPlease:
continue
filteredFileContent.append(url)
return filteredFileContent
def _cleanurl(self, url):
return str(url).strip()
def _httpget(self, url):
try:
with closing(get(url, stream=True)) as resp:
if self._isResponseOK(resp):
return resp.text
else:
return None
except RequestException as e:
print(str(e))
return None
def _isResponseOK(self, resp):
content_type = resp.headers['Content-Type'].lower()
if resp.status_code != 200:
saveToFile("C:\Data\AnalyzeLinks", [resp.status_code, resp.url])
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,341
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/LinkScraper.py
|
import abc
from datetime import date, timedelta
import time
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import ScraperHelper as helper
class LinkScraperAbstract(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getWorkUrls(self):
"""Required Method"""
@abc.abstractmethod
def getLinksFromPage(self, pageContent):
"""Required Method"""
@abc.abstractmethod
def getPageContent(self, resourceLink):
"""Required Method"""
@abc.abstractmethod
def getCpuCount(self):
"""Required Method"""
class FifteenLinkScraper(LinkScraperAbstract):
def __init__(self, cpuCount, fromDate, toDate, seedUrl, params):
self._cpuCount = cpuCount
self._fromDate = fromDate
self._toDate = toDate
self._seedUrl = seedUrl
self._params = params
def getWorkUrls(self):
workUrls = []
iterationsCount = self.getIterationsCount()
for i in range(0, iterationsCount):
url = self._seedUrl
newDate = self._toDate - timedelta(i)
urlParams = str(self._params).format(self.formatDate(newDate))
url += urlParams
workUrls.append(url)
return workUrls
def formatDate(self, date = date(2000, 1, 1)):
return date.strftime("%Y-%m-%d")
def getIterationsCount(self):
dateDelta = self._toDate - self._fromDate
iterationsCount = dateDelta.days
iterationsCount += 1
return iterationsCount
def getLinksFromPage(self, pageContent):
soup = BeautifulSoup(pageContent, 'html.parser')
links = set()
for article in soup.findAll("article"):
for a in article.findAll("a", attrs={"class":"vl-img-container"}):
links.add(a.get('href'))
return links
def getPageContent(self, resourceLink):
return helper.httpget(resourceLink)
def getCpuCount(self):
return self._cpuCount
class DelfiLinkScraper(LinkScraperAbstract):
def __init__(self, cpuCount, fromDate, toDate, seedUrl, params, iterationsCount = 0):
self._cpuCount = cpuCount
self._fromDate = fromDate
self._toDate = toDate
self._seedUrl = seedUrl
self._params = params
self._iterationsCount = iterationsCount
def getWorkUrls(self):
workUrls = []
iterationsCount = self.getIterationsCount()
fromDateText = self.formatDate(self._fromDate)
toDateText = self.formatDate(self._toDate)
for i in range(1, iterationsCount):
url = self._seedUrl
urlParams = str(self._params).format(fromDateText, toDateText, i)
url += urlParams
workUrls.append(url)
return workUrls
def formatDate(self, date=date(2000, 1, 1)):
return date.strftime("%d.%m.%Y")
def getIterationsCount(self):
return self._iterationsCount + 1
def getLinksFromPage(self, pageContent):
soup = BeautifulSoup(pageContent, 'html.parser')
links = set()
for div in soup.findAll("div", attrs={"class":"row"}):
for a in div.findAll("a", attrs={"class":"img-link"}):
links.add(a.get('href'))
return links
def getPageContent(self, resourceLink):
return helper.httpget(resourceLink)
def getCpuCount(self):
return self._cpuCount
class LrytasLinkScraper(LinkScraperAbstract):
def __init__(self, fromDate, toDate, seedUrl, webDriverPath):
self._fromDate = fromDate
self._toDate = toDate
self._seedUrl = seedUrl
self._webDriverPath = webDriverPath
def getWorkUrls(self):
workUrls = []
workUrls.append(self._seedUrl)
return workUrls
def getIterationsCount(self):
return self._iterationsCount
def getLinksFromPage(self, pageContent):
soup = BeautifulSoup(pageContent, 'html.parser')
links = set()
if soup:
for article in soup.findAll("article", attrs={"class":"post"}):
articleLinkTag = article.find("a")
if articleLinkTag:
articleLink = articleLinkTag.get("href")
if self._isLinkValid(articleLink):
links.add(articleLink)
return links
def getPageContent(self, resourceLink):
chrome_options = Options()
#chrome_options.add_argument("--headless")
cdi = webdriver.Chrome(self._webDriverPath, options=chrome_options)
loadMoreElement = (By.ID, "loadMore")
cdi.get(resourceLink)
timesLoaded = 1
pageContent = None
try:
continueLoading = True
while continueLoading:
time.sleep(1)
WebDriverWait(cdi, 10).until(EC.element_to_be_clickable(loadMoreElement)).click()
timesLoaded += 1
if (timesLoaded % 25) == 0:
partialPageContent = cdi.page_source
continueLoading = self._continueLoading(partialPageContent)
pageContent = cdi.page_source
except Exception as ex:
print("Exception has occured on iteration " + str(timesLoaded) + ": " + str(ex))
cdi.quit()
return pageContent
def _continueLoading(self, pageContent):
continueLoading = True
soup = BeautifulSoup(pageContent, 'html.parser')
lastArticleTag = soup.findAll("article", attrs={"class":"post"})[-1]
lastArticleLinkTag = lastArticleTag.find("a")
lastArticleDate = None
if lastArticleLinkTag:
url = lastArticleLinkTag.get("href")
parsedUrl = urlparse(url)
sectorPosition = 0
pathParts = parsedUrl.path[1:].split("/")
for sector in pathParts:
try:
value = int(sector)
except:
value = 0
if value != 0:
year = value
try:
month = int(pathParts[sectorPosition + 1])
day = int(pathParts[sectorPosition + 2])
lastArticleDate = date(year, month, day)
except:
lastArticleDate = None
break
sectorPosition += 1
if lastArticleDate and lastArticleDate < self._fromDate:
continueLoading = False
if lastArticleDate:
print("Currently on date: " + str(lastArticleDate))
return continueLoading
def _isLinkValid(self, link):
isValid = True
if len(link) == 0 or link is None:
isValid = False
return isValid
def getCpuCount(self):
return 1
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,342
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/GetLinks.py
|
from LinkScraper import FifteenLinkScraper, DelfiLinkScraper, LrytasLinkScraper
import ScraperHelper as helper
from datetime import date
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
class SimpleLinkScraper:
def __init__(self, linkScraperStrategy):
self._linkScraperStrategy = linkScraperStrategy
def getLinks(self):
workUrls = self._linkScraperStrategy.getWorkUrls()
inputs = tqdm(workUrls)
links = Parallel(n_jobs=self._linkScraperStrategy.getCpuCount())(delayed(self._processUrl)(url) for url in inputs)
return self._mergeResults(links)
def _processUrl(self, url):
pageContent = self._linkScraperStrategy.getPageContent(url)
linksFromPage = self._linkScraperStrategy.getLinksFromPage(pageContent)
return linksFromPage
def _mergeResults(self, setsOfLinks):
merged = set()
for eachSet in setsOfLinks:
merged = merged.union(eachSet)
return merged
def main():
workFolder = "C:\Data\GetLinks"
workSessionFolder = helper.createWorkSessionFolder(workFolder)
fromDate = date(2019, 1, 1)
toDate = date(2019, 12, 31)
fifteenSeedUrl = "https://www.15min.lt/naujienos/aktualu/lietuva"
fifteenParams = "?offset={0}%2023:59:59" #15min date format: year-month-day
delfiSeedUrl = "https://www.delfi.lt/archive/index.php"
delfiParams = "?fromd={0}&tod={1}&channel=1&category=0&query=&page={2}" #delfi date in format: day.month.year
delfiIterationsCount = 866
#delfiIterationsCount = 2 #Test
# Iterations justification: each click on "Daugiau" loads 24 unique articles. We load articles forever and at each 25th
# load we check the last article's date from it's url - if it's still newer than fromDate - we continue the articles loading.
# This strategy was set up to work like so because there is no trivial way to access the archive in lrytas.lt portal.
lrytasSeedUrl = "https://www.lrytas.lt/lietuvosdiena/aktualijos/"
lrytasWebDriverPath = ChromeDriverManager().install()
cpuCount = multiprocessing.cpu_count()
fifteenLinkScraper = SimpleLinkScraper(FifteenLinkScraper(cpuCount, fromDate, toDate, fifteenSeedUrl, fifteenParams))
fifteenLinks = fifteenLinkScraper.getLinks()
helper.saveToFile(workSessionFolder, fifteenLinks)
delfiLinkScraper = SimpleLinkScraper(DelfiLinkScraper(cpuCount, fromDate, toDate, delfiSeedUrl, delfiParams, delfiIterationsCount))
delfiLinks = delfiLinkScraper.getLinks()
helper.saveToFile(workSessionFolder, delfiLinks)
lrytasLinkScraper = SimpleLinkScraper(LrytasLinkScraper(fromDate, toDate, lrytasSeedUrl, lrytasWebDriverPath))
lrytasLinks = lrytasLinkScraper.getLinks()
helper.saveToFile(workSessionFolder, lrytasLinks)
if __name__ == '__main__':
main()
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,343
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/ScraperHelper.py
|
import os
from datetime import datetime
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
def getCurrentDateTime():
now = datetime.now()
return now.strftime("%d_%m_%Y_%H_%M_%S")
def createWorkSessionFolder(createInPath):
createdFolder = createInPath + "\\" + "session_" + getCurrentDateTime()
os.mkdir(createdFolder)
return createdFolder
def saveToFile(path, links):
fileNameWithPath = path + "\\" + "result.csv" #TODO: result.csv is hardcoded, make it dynamic
file = open(fileNameWithPath, "a+")
for link in links:
if (link is not None):
file.write(link + "\n")
file.close()
def httpget(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if isResponseOK(resp):
return resp.text
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e))) #TODO: would this even work?
return None
def isResponseOK(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
if resp.status_code != 200:
saveToFile("C:\Data\GetLinks", [resp.status_code, resp.url]) #TODO: make it dynamic
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,344
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/regex.py
|
import re
print(re.findall( r'all (.*?) are', 'all cats are smarter than dogs, all dogs are dumber than cats'))
# Output: ['cats', 'dogs']
print([x.group() for x in re.finditer( r'all (.*?) are', 'all cats are smarter than dogs, all dogs are dumber than cats')])
# Output: ['all cats are', 'all dogs are']
print("stringidfngidfs"[:5])
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,345
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/Dictionary.py
|
import ScraperHelper as helper
from AnalyzeLinks import SimpleContentScraper
from ContentFetcher import FileContentFetcher
from ContentScraper import FifteenContentScraper, DelfiContentScraper, LrytasContentScraper
from bs4 import BeautifulSoup
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
def cleanPageContent(html):
soup = BeautifulSoup(html, "html.parser") # create a new bs4 object from the html data loaded
for script in soup(["script", "style"]): # remove all javascript and stylesheet code
script.extract()
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def createDictionary(work, fetcher):
scraper = SimpleContentScraper.getContentScraperStrategy(fetcher.getContentScraperSuggestion(work), work)
scraper.createParser(fetcher.getContent(work))
articleScopes = scraper.getArticleScope()
wordsDict = {}
text = ""
for scope in articleScopes:
try:
text += " " + scope.text
except:
text += " " + scope
translation_table = dict.fromkeys(map(ord, '!@#$%^&?*()_+=[]-;<>/\|~1234567890\,.:"„“'), None)
text = text.translate(translation_table)
text = text.replace("\n", " ")
text = text.replace("\t", "")
text = text.replace("–", " ")
text = text.strip()
for word in text.split(" "):
word = word.strip()
if len(word) == 0:
continue
if word not in wordsDict:
wordsDict[word] = 1
else:
wordsDict[word] = wordsDict[word] + 1
return wordsDict
def processWork(work, fetcher):
wordsDict = {}
try:
wordsDict = createDictionary(work, fetcher)
except Exception as ex:
print("Could not process, exception caught: " + str(ex))
return wordsDict
def main():
mypath = "C:\Data\deliverables\iteration3\sources"
#mypath = "C:\Data\AnalyzeLinks\session_10_05_2020_17_48_14" #Test
dictionariesPath = "C:\Data\Dictionary"
cpuCount = multiprocessing.cpu_count()
fetcher = FileContentFetcher(mypath)
workList = tqdm(fetcher.getWorkList())
wordDictionaries = Parallel(n_jobs=cpuCount)(delayed(processWork)(work, fetcher) for work in workList)
wordDict = {}
for dictionary in wordDictionaries:
for key in dictionary:
if key not in wordDict:
wordDict[key] = dictionary[key]
else:
wordDict[key] = wordDict[key] + dictionary[key]
dictFileName = dictionariesPath + "\\" + "dictionary_" + helper.getCurrentDateTime() + ".csv"
resultFile = open(dictFileName, "w+", encoding="utf-8")
for key in wordDict:
resultFile.write(key + "," + str(wordDict[key]) + "\n")
resultFile.close()
if __name__ == '__main__':
main()
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,346
|
maeof/ScrapeNewsAgencies
|
refs/heads/master
|
/oldGetLinks2.py
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import time
chrome_options = Options()
chrome_options.add_argument("--headless")
#service = Service('c:\\data\\chromedriver\\chromedriver.exe')
#service.start()
cdi = webdriver.Chrome("c:\\data\\chromedriver\\chromedriver.exe", options=chrome_options)
#cdi = webdriver.Remote(service.service_url)
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
import urllib3 as urllib
from urllib.parse import urlparse
import sys
import os
from datetime import datetime
def getCurrentDateTime():
now = datetime.now()
return now.strftime("%d_%m_%Y_%H_%M_%S")
def createWorkSessionFolder(createInPath):
createdFolder = createInPath + "\\" + "session_" + getCurrentDateTime()
os.mkdir(createdFolder)
return createdFolder
workFolder = "C:\Data\GetLinks"
workSessionFolder = createWorkSessionFolder(workFolder)
def httpget(url):
cdi.get(url)
time.sleep(3)
return cdi.page_source
def getIncrementalUrl(url, i):
return url.replace("{0}", str(i))
def isIncrementalUrl(url):
if (url.find("{0}") != -1):
return True
else:
return False
def getLinksFromPageContent(pageContent):
soup = BeautifulSoup(pageContent, 'html.parser')
links = set()
for a in soup.find_all('a'):
url = a.get('href')
links.add(url)
return links
def saveToFile(path, links):
fileNameWithPath = path + "\\" + "result.csv"
file = open(fileNameWithPath, "a+")
for link in links:
if (link is not None):
file.write(link + "\n")
file.close()
def getLinks(regularUrls):
allLinks = set()
for url in regularUrls:
pageContent = httpget(url)
links = getLinksFromPageContent(pageContent)
allLinks = allLinks.union(links)
saveToFile(workSessionFolder, allLinks)
def getLinksFromIncrementalUrls(incrementalUrls, pagesCount):
allLinks = set()
for url in incrementalUrls:
for i in range(1, pagesCount + 1):
urlForRequest = getIncrementalUrl(url, i)
print(urlForRequest)
startTime = time.time()
pageContent = httpget(urlForRequest)
endTime = time.time()
print("httpget: {0}".format(endTime - startTime))
startTime = time.time()
links = getLinksFromPageContent(pageContent)
endTime = time.time()
print("getLinksFromPageContent: {0}".format(endTime - startTime))
startTime = time.time()
allLinks = allLinks.union(links)
print("allLinks.union(links): {0}".format(endTime - startTime))
endTime = time.time()
saveToFile(workSessionFolder, allLinks)
def validateArgs(args):
if (args[1] is None or args[2] is None):
print("Wrong arguments. 1st argument is the file of links, 2nd argument is the incremental value of how many pages to view.")
return False
def main(args):
#if (validateArgs(args) == False):
#return
#linksFilePath = str(args[1])
#pagesCount = int(args[2])
#linksFilePath = "links2.txt"
linksFilePath = "links.txt"
pagesCount = 10
file = open(linksFilePath, "r")
fileLines = file.readlines()
file.close()
incrementalUrls = []
regularUrls = []
for line in fileLines:
if (isIncrementalUrl(line)):
incrementalUrls.append(line)
else:
regularUrls.append(line)
getLinksFromIncrementalUrls(incrementalUrls, pagesCount)
#getLinks(regularUrls)
cdi.quit()
if __name__ == '__main__':
main(sys.argv)
|
{"/ContentScraper.py": ["/ScraperHelper.py"], "/AnalyzeLinks.py": ["/ScraperHelper.py", "/ContentFetcher.py", "/ContentScraper.py"], "/LinkScraper.py": ["/ScraperHelper.py"], "/GetLinks.py": ["/LinkScraper.py", "/ScraperHelper.py"], "/Dictionary.py": ["/ScraperHelper.py", "/AnalyzeLinks.py", "/ContentFetcher.py", "/ContentScraper.py"]}
|
16,348
|
Swetha-14/opinion-mining
|
refs/heads/master
|
/network/models.py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.core.validators import MinValueValidator, MaxLengthValidator
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from profanity.validators import validate_is_profane
class User(AbstractUser):
followers = models.ManyToManyField('self',symmetrical = False, related_name='following', blank=True)
class Post(models.Model):
user = models.ForeignKey('User', on_delete=models.CASCADE, related_name="posts")
content = models.TextField(blank=True)
image = models.ImageField(upload_to='images/', blank=True, null = True)
likes = models.ManyToManyField('User', related_name="liked_posts", blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
def serialize(self):
return {
"id": self.id,
"user": self.user.username,
"content": self.content,
"likes": self.likes,
"timestamp": self.timestamp.strftime("%b %-d %Y, %-I:%M %p")
}
class Comment(models.Model):
user = models.ForeignKey("User", on_delete=models.CASCADE, related_name="commented_user")
post = models.ForeignKey("Post", on_delete=models.CASCADE, related_name="comments")
comment = models.TextField(max_length=500, validators=[validate_is_profane])
|
{"/network/views.py": ["/network/models.py"]}
|
16,349
|
Swetha-14/opinion-mining
|
refs/heads/master
|
/network/migrations/0010_auto_20210606_1750.py
|
# Generated by Django 3.2.3 on 2021-06-06 12:20
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0009_auto_20210519_1336'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='comments',
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=500, validators=[django.core.validators.MaxLengthValidator(1000)])),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='network.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commented_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/network/views.py": ["/network/models.py"]}
|
16,350
|
Swetha-14/opinion-mining
|
refs/heads/master
|
/network/migrations/0014_alter_comment_comment.py
|
# Generated by Django 3.2.3 on 2021-07-23 08:09
from django.db import migrations, models
import profanity.validators
class Migration(migrations.Migration):
dependencies = [
('network', '0013_alter_post_image'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment',
field=models.TextField(max_length=500, validators=[profanity.validators.validate_is_profane]),
),
]
|
{"/network/views.py": ["/network/models.py"]}
|
16,351
|
Swetha-14/opinion-mining
|
refs/heads/master
|
/network/migrations/0009_auto_20210519_1336.py
|
# Generated by Django 3.2.3 on 2021-05-19 08:06
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('network', '0008_alter_post_image'),
]
operations = [
migrations.AddField(
model_name='post',
name='comments',
field=models.ManyToManyField(blank=True, related_name='commented_posts', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Comment',
),
]
|
{"/network/views.py": ["/network/models.py"]}
|
16,352
|
Swetha-14/opinion-mining
|
refs/heads/master
|
/network/views.py
|
import json
import csv
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator
from django.contrib import messages
from better_profanity import profanity
import re
import string
from .models import User, Post, Comment
from .forms import *
# Create Post
def index(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES or None)
if form.is_valid():
post = Post(
user=User.objects.get(pk=request.user.id),
content=form.cleaned_data.get("content"),
image = form.cleaned_data.get("image")
)
post.save()
return redirect('index')
else:
form = PostForm()
posts = Post.objects.all().order_by("-timestamp")
paginator = Paginator(posts, 10)
page_obj = paginator.get_page(request.GET.get('page'))
return render(request, "network/index.html", {
"page_obj": page_obj,
'form' : form
})
@login_required
def user_page(request, username):
target_user = User.objects.get(username=username)
posts = Post.objects.filter(user__username=username).order_by("-timestamp")
paginator = Paginator(posts, 10)
page_obj = paginator.get_page(request.GET.get('page'))
return render(request, "network/profile.html", {
"target_user": target_user,
"page_obj": page_obj
})
@login_required
def following(request):
posts = []
all_posts = Post.objects.order_by("-timestamp").all()
# Iterating each and every post and checking if that post's owner is in the loggedin user's following list
for post in all_posts:
if post.user in request.user.following.all():
# Then append that post to the post list initialized above
posts.append(post)
paginator = Paginator(posts, 10)
page_obj = paginator.get_page(request.GET.get('page'))
return render(request, "network/following.html", {
"page_obj": page_obj
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "network/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "network/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "network/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/register.html")
@csrf_exempt
def follow_user(request, username):
target_user = User.objects.get(username=username)
# Unfollow the user if the user is already in the followers
if request.user in target_user.followers.all():
target_user.followers.remove(request.user)
target_user.save()
return JsonResponse({"message": f'{username} unfollowed!'})
# Follow the user if the user is not in the followers
target_user.followers.add(request.user)
target_user.save()
return JsonResponse({"message": f'{username} followed!'})
@csrf_exempt
@login_required
def edit_post(request):
# Liking a new post must be via PUT
if request.method == "PUT":
data = json.loads(request.body)
post_id = data.get("postId", "")
content = data.get("content", "")
post = Post.objects.get(pk=post_id)
# Ensure to edit only the user's own posts
if request.user.username != post.user.username:
return JsonResponse({"error": "Can't edit another user's post"}, status=403)
post.content = content
post.save()
return JsonResponse({"message": "Post edited!"}, status=200)
else:
return JsonResponse({"error": "Must be PUT method"}, status=400)
@csrf_exempt
@login_required
def like_post(request):
# Liking a new post must be via PUT
if request.method == "PUT":
data = json.loads(request.body)
post_id = data.get("postId", "")
post = Post.objects.get(pk=post_id)
# Unlike if the User already liked
if request.user in post.likes.all():
post.likes.remove(request.user)
post.save()
return JsonResponse({"liked": False}, status=200)
# Else Like it
post.likes.add(request.user)
post.save()
return JsonResponse({"liked": True}, status=200)
else:
return JsonResponse({"error": "Must be PUT method"}, status=400)
@csrf_exempt
@login_required
def comment_post(request, id):
if request.method == "POST":
if not request.user.is_authenticated:
messages.warning(request, 'Log in to submit comments, like posts and more!')
return HttpResponseRedirect(reverse("index"))
else:
if request.POST["type"] == "comment":
added_comment = request.POST["content"]
comment = Comment(
user = User.objects.get(pk=request.user.id),
post = Post.objects.get(pk=id),
comment = added_comment
)
comment.save()
return HttpResponseRedirect(reverse("index"))
|
{"/network/views.py": ["/network/models.py"]}
|
16,366
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/utils.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Utils
#
# Basic Utilities File
#
# required libraries
import logging
import os
class Utils():
# Function to process the string to boolean
@staticmethod
def str_to_bool(string_val):
logging.debug("str_to_bool received string: %s"%string_val)
if not string_val or not isinstance(string_val, basestring):
logging.debug("returning None as string_val is not provided or is incorrect")
return None
string_val = string_val.capitalize()
if string_val == "True":
logging.debug("returning boolean True")
return True
elif string_val == "False":
logging.debug("returning boolean False")
return False
else:
logging.debug("returning as is as string_val cannot be processed")
return string_val
# Function to get the file extension
@staticmethod
def get_file_type(file_name):
if not file_name:
logging.debug("returning None as file_name is not provided")
return None
file_ext = os.path.splitext(file_name)[1]
logging.debug("returning File extension: %s"%file_ext)
return file_ext
# Function to create output directory if missing
@staticmethod
def create_output_directory(file_name):
if not file_name:
return None
file_dir = os.path.dirname(file_name)
if not os.path.exists(file_dir):
logging.debug("Creating the output directory")
os.makedirs(file_dir)
logging.debug("Output directory already present")
return True
# Function to get the absolute paths relative to the current working directory
@staticmethod
def get_absolute_path(input_file_name,output_file_name,curr_working_dir=os.getcwd()):
root = curr_working_dir
if input_file_name:
if not input_file_name.startswith("data"):
input_file_name = os.path.join(root,"data",input_file_name)
else:
input_file_name = os.path.join(root,input_file_name)
input_file_name = input_file_name.replace('\\',"/") # as we are on linux-ubuntu
if output_file_name:
if not output_file_name.startswith("results"):
output_file_name = os.path.join(root,"results",output_file_name)
else:
output_file_name = os.path.join(root,output_file_name)
output_file_name
output_file_name = output_file_name.replace('\\',"/") # as we are on linux-ubuntu
return input_file_name, output_file_name
|
{"/main.py": ["/modules/utils.py"]}
|
16,367
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/utils_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Utils Unit Test
#
from utils import Utils
import os
# Test the str_to_bool function
def test_str_to_bool_with_no_args():
assert(Utils.str_to_bool(None) == None)
assert(Utils.str_to_bool("") == None)
def test_str_to_bool_with_incorrect_args():
assert(Utils.str_to_bool("Test") == "Test")
def test_str_to_bool_with_incorrect_args_type():
assert(Utils.str_to_bool(1) == None)
def test_get_file_type_with_no_args():
assert(Utils.get_file_type(None) == None)
assert(Utils.get_file_type("") == None)
def test_get_file_type_with_no_file_ext():
assert(Utils.get_file_type("Test") == "")
def test_get_file_type_with_file_ext():
assert(Utils.get_file_type("Test.txt") == ".txt")
assert(Utils.get_file_type("Test/Test.txt") == ".txt")
def test_create_output_directory_with_no_args():
assert(Utils.create_output_directory(None) == None)
def test_create_output_directory_with_no_existing_dir():
assert(Utils.create_output_directory("data/data1/test.txt") == True)
def test_create_output_directory_with_existing_dir():
assert(Utils.create_output_directory("data/data1/test.txt") == True)
os.rmdir(os.path.join(os.getcwd(),"data/data1"))
def test_get_absolute_path_with_no_args():
assert(Utils.get_absolute_path(None,None) == (None,None))
def test_get_absolute_path_with_input_args():
assert(Utils.get_absolute_path("",None) == ("",None))
def test_get_absolute_path_with_input_file():
path = os.path.join(os.getcwd(),"data","test.xml")
assert(Utils.get_absolute_path("test.xml",None) == (path,None))
assert(Utils.get_absolute_path("data/test.xml",None) == (path,None))
def test_get_absolute_path_with_output_file():
path = os.path.join(os.getcwd(),"results","test.xml")
assert(Utils.get_absolute_path(None,"test.xml") == (None,path))
assert(Utils.get_absolute_path(None,"results/test.xml") == (None,path))
|
{"/main.py": ["/modules/utils.py"]}
|
16,368
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/csv_parser_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Csv Parser - Class Tests
#
from csv_parser import CsvParser
import os
test_file = os.path.join(os.getcwd(),"Test.csv")
def test_csv_parser_with_missing_values():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""name,active,value
John,true,
Mark,true,
Paul,false,100
Ben,true,150
""")
y = CsvParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 150)
def test_csv_parser_with_non_csv_values():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""name,active,value
John true
Mark true
Paul false 100
Ben true 150
""")
y = CsvParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_csv_parser_with_blank_file():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("")
y = CsvParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_csv_parser_with_no_child_nodes():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""name,active,value""")
y = CsvParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
# delete the test file as not required
os.remove(test_file)
|
{"/main.py": ["/modules/utils.py"]}
|
16,369
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/file_handler_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
# required libraries
from file_handler import FileHandler
# File Hander
# This module will handle all the file handling operations
def test_file_handler_non_std_file_types():
try:
fh = FileHandler("Data/Test.psv")
fh.process_file()
assert(False)
except IOError:
assert(True)
|
{"/main.py": ["/modules/utils.py"]}
|
16,370
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/parser_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Parser - Base Class Tests
#
from parser import Parser
def test_parser_if_abstract():
try:
p = Parser()
assert(False)
except TypeError:
assert(True)
def test_parser_process_file_function():
class B(Parser):
def parse_file(self):
pass
try:
B = B("test")
B.process_file()
assert(isinstance(B.get_value(), int))
assert(B.get_value() == 0)
except:
assert(False)
|
{"/main.py": ["/modules/utils.py"]}
|
16,371
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/yml_parser_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Yml Parser - Class Tests
#
from yml_parser import YmlParser
import os
test_file = os.path.join(os.getcwd(),"Test.yml")
def test_yml_parser_with_missing_values():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""users:
- name: Paul
active: false
value: 100
- name: Ben
active: true""")
y = YmlParser(test_file)
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_yml_parser_with_malformed_yml():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""users:
- name Paul
active false
value 100
- name: Ben
active: true""")
y = YmlParser(test_file)
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_yml_parser_with_blank_file():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("")
y = YmlParser(test_file)
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_yml_parser_with_just_headers():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("users:")
y = YmlParser(test_file)
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
# delete the test file as not required
os.remove(test_file)
|
{"/main.py": ["/modules/utils.py"]}
|
16,372
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/main.py
|
#
# Nimish Nayak
# 10-06-2017
#
# Create a command line tool
# which reads data from a file (csv, yml, xml), but they contain the same data
# performs simple operation on this data
# stores or prints a result. The result could be stored in a plain text file or printed on stdout
# Note: Developed using Python 2.7.13
# required libraries
import logging
import os
from optparse import OptionParser
from modules import file_handler
from modules.utils import Utils
# main function
if __name__ == "__main__":
# initialize the logger
file_name = os.path.basename(__file__).strip().replace(".py",".log")
# Note: Logging Level -> Production - INFO, Development - DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', filename=file_name, level=logging.DEBUG, datefmt='%Y-%m-%d %I:%M:%S %p')
# Set the CL options
parser = OptionParser()
usage = "usage: %prog [options] arg1 arg2"
parser.add_option("-i", "--input", type="string",
help="absolute path of the input file name with file extension",
dest="input_file")
parser.add_option("-o", "--output", type="string",
help="absolute path of the output file name with file extension",
dest="output_file")
options, arguments = parser.parse_args()
logging.info('Application Started')
# Raise an error if the input file is missing
if not options.input_file:
raise ValueError("The input file is missing")
# Debug Statements
logging.debug("Input File %s"%options.input_file)
logging.debug("Output File %s"%options.output_file)
logging.debug("Arguments: %s"%arguments)
input_file, output_file = Utils.get_absolute_path(options.input_file,options.output_file,os.getcwd())
logging.debug("Absolute Input File %s"%input_file)
logging.debug("Absolute Output File %s"%output_file)
fh = file_handler.FileHandler(input_file, output_file)
fh.process_file()
fh.write_output()
logging.info('Application Finished')
|
{"/main.py": ["/modules/utils.py"]}
|
16,373
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/yml_parser.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# YAML Parser
#
# required libraries
import logging
import yaml
from parser import Parser
class YmlParser(Parser):
# initialize the instance variables
def __init__(self, input_file):
# call the base class constructor
Parser.__init__(self, input_file)
logging.debug("Yml parser initialized")
# parse the file using a xml parser
def parse_file(self):
with open(self.input_file, "r") as f:
data = yaml.load(f)
if data and "users" in data:
self.data = data["users"]
|
{"/main.py": ["/modules/utils.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.